hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
ff4f9107b70324b15eb1f65504d805a8793e0db4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "calcSoftmaxBackwardGPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *dz_next_layer = NULL;
hipMalloc(&dz_next_layer, XSIZE*YSIZE);
float *dz_in = NULL;
hipMalloc(&dz_in, XSIZE*YSIZE);
float *dz = NULL;
hipMalloc(&dz, XSIZE*YSIZE);
unsigned int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
calcSoftmaxBackwardGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, dz_next_layer,dz_in,dz,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
calcSoftmaxBackwardGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, dz_next_layer,dz_in,dz,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
calcSoftmaxBackwardGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, dz_next_layer,dz_in,dz,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ff4f9107b70324b15eb1f65504d805a8793e0db4.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "calcSoftmaxBackwardGPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *dz_next_layer = NULL;
cudaMalloc(&dz_next_layer, XSIZE*YSIZE);
float *dz_in = NULL;
cudaMalloc(&dz_in, XSIZE*YSIZE);
float *dz = NULL;
cudaMalloc(&dz, XSIZE*YSIZE);
unsigned int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
calcSoftmaxBackwardGPU<<<gridBlock,threadBlock>>>(dz_next_layer,dz_in,dz,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
calcSoftmaxBackwardGPU<<<gridBlock,threadBlock>>>(dz_next_layer,dz_in,dz,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
calcSoftmaxBackwardGPU<<<gridBlock,threadBlock>>>(dz_next_layer,dz_in,dz,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0ae70dd930f7dfd713680d085e3e962f43156b43.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <iostream>
#include <vector>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/sgemm_traits.h"
#pragma warning( disable : 4503)
/*
This example demonstrates how to use cutlass to compute a batched strided gemm.
In this example, both A and B matrix are non-transpose and column major matrix
batched_C = batched_A x batched_B
As an example, matrix C can be seen as
-----------------------------------------------------------
(0,0,0) | (0,0,1) | (0,0,2) | (1,0,0) | (1,0,1) | (1,0,2) |
-----------------------------------------------------------
(0,1,0) | (0,1,1) | (0,1,2) | (1,1,0) | (1,1,1) | (1,1,2) |
-----------------------------------------------------------
(0,2,0) | (0,2,1) | (0,2,2) | (1,2,0) | (1,2,1) | (1,2,2) |
-----------------------------------------------------------
(0,3,0) | (0,3,1) | (0,3,2) | (1,3,0) | (1,3,1) | (1,3,2) |
-----------------------------------------------------------
(0,4,0) | (0,4,1) | (0,4,2) | (1,4,0) | (1,4,1) | (1,4,2) |
-----------------------------------------------------------
(0,5,0) | (0,5,1) | (0,5,2) | (1,5,0) | (1,5,1) | (1,5,2) |
-----------------------------------------------------------
batch 0 | batch 1
where we denote each element with (batch_idx, row_idx, column_idx)
In this example, batch size is 2, M is 6 and N is 3
The stride (batch_stride_C) between the first element of two batches is ldc * n
matrix A can be seen as
---------------------------------------
(0,0,0) | (0,0,1) | (1,0,0) | (1,0,1) |
---------------------------------------
(0,1,0) | (0,1,1) | (1,1,0) | (1,1,1) |
---------------------------------------
(0,2,0) | (0,2,1) | (1,2,0) | (1,2,1) |
---------------------------------------
(0,3,0) | (0,3,1) | (1,3,0) | (1,3,1) |
---------------------------------------
(0,4,0) | (0,4,1) | (1,4,0) | (1,4,1) |
---------------------------------------
(0,5,0) | (0,5,1) | (1,5,0) | (1,5,1) |
---------------------------------------
batch 0 | batch 1
, where batch size is 2, M is 6 and K is 2
The stride (batch_stride_B) between the first element of two batches is lda * k
matrix B can be seen as
-----------------------------
(0,0,0) | (0,0,1) | (0,0,2) |
----------------------------- batch 0
(0,1,0) | (0,1,1) | (0,1,2) |
-------------------------------------
(1,0,0) | (1,0,1) | (1,0,2) |
----------------------------- batch 1
(1,1,0) | (1,1,1) | (1,1,2) |
-----------------------------
, where the batch size is 2, N is 3 and K is 2
The stride (batch_stride_C) between the first element of two batches is k
*/
hipError_t cutlass_strided_batched_sgemm(float const *A,
int lda,
long long int batch_stride_A,
float const *B,
int ldb,
long long int batch_stride_B,
float *C,
int ldc,
long long int batch_stride_C,
float alpha,
float beta,
int m,
int n,
int k,
int batch_count) {
// create a cutlass traits
typedef cutlass::gemm::SgemmTraits<cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<8, 128, 128> >
SgemmTraits;
// create a CUTLASS GEMM object.
typedef cutlass::gemm::Gemm<SgemmTraits> Gemm;
// Construct and initialize CUTLASS GEMM parameters object.
typename Gemm::Params params;
int result = params.initialize(
m, // M dimension for each batch
n, // N dimension for each batch
k, // K dimension for each batch
alpha, // scalar alpha
A,
lda,
batch_stride_A, // distance in memory between the first element of neighboring batch
B,
ldb,
batch_stride_B, // distance in memory between the first element of neighboring batch
beta, // scalar beta
C, // source matrix C
ldc,
batch_stride_C, // distance in memory between the first element of neighboring batch
C, // destination matrix C (may be different memory than source C matrix)
ldc,
batch_stride_C, // distance in memory between the first element of neighboring batch
batch_count
);
if (result != 0) {
std::cerr << "Failed to initialize CUTLASS Gemm::Params object." << std::endl;
return hipErrorInvalidValue;
}
// Launch the CUTLASS GEMM kernel.
Gemm::launch(params);
result = hipDeviceSynchronize();
if (result != hipSuccess) {
std::cerr << "kernel launch result = " << result << std::endl;
}
return hipGetLastError();
}
template<typename T>
hipError_t strided_batched_gemm_nn_reference(std::vector<T> const &A,
int lda,
long long int batch_stride_A,
std::vector<T> const &B,
int ldb,
long long int batch_stride_B,
std::vector<T> &C,
int ldc,
long long int batch_stride_C,
T alpha,
T beta,
int m,
int n,
int k,
int batch_count) {
/*
strided batched gemm NN
*/
hipError_t result = hipSuccess;
if (A.size() < lda * k * batch_count) {
std::cout << "the size of A is too small" << std::endl;
return hipErrorInvalidValue;
}
if (B.size() < ldb * n) {
std::cout << "the size of B is too small" << std::endl;
return hipErrorInvalidValue;
}
if (C.size() < ldc * n * batch_count) {
std::cout << "the size of C is too small" << std::endl;
return hipErrorInvalidValue;
}
for (int batch_idx = 0; batch_idx < batch_count; batch_idx++) {
for (int n_idx = 0; n_idx < n; n_idx++) {
for (int m_idx = 0; m_idx < m; m_idx++) {
T accum = beta * C[batch_idx * batch_stride_C + n_idx * ldc + m_idx];
for (int k_idx = 0; k_idx < k; k_idx++) {
accum += alpha
* A[batch_idx * batch_stride_A + k_idx * lda + m_idx]
* B[batch_idx * batch_stride_B + n_idx * ldb + k_idx];
}
C[batch_idx * batch_stride_C + n_idx * ldc + m_idx] = accum;
}
}
}
return result;
}
int main() {
int const m = 16;
int const n = 24;
int const k = 8;
int const batch_count = 3;
// A, B are non-transpose, column major
int const lda = m;
int const ldb = k * batch_count;
int const ldc = m;
int const count_A = batch_count * lda * k;
int const count_B = ldb * n;
int const count_C = batch_count * ldc * n;
// the memory is batched along K dimension
long long int batch_stride_A = static_cast<long long int>(lda) * static_cast<long long int>(k);
long long int batch_stride_B = static_cast<long long int>(k);
long long int batch_stride_C = static_cast<long long int>(ldc) * static_cast<long long int>(n);
// alpha and beta
float alpha = 1.0f;
float beta = 2.0f;
hipError_t result = hipSuccess;
// allocate the host memory
std::vector<float> host_A(count_A);
std::vector<float> host_B(count_B);
std::vector<float> host_C(count_C);
std::vector<float> result_C(count_C);
// allocate the device memory
float *A;
float *B;
float *C;
result = hipMalloc(&A, count_A * sizeof(float));
if (result != hipSuccess) {
std::cerr << "hipMalloc result = " << result << std::endl;
return result;
}
result = hipMalloc(&B, count_B * sizeof(float));
if (result != hipSuccess) {
std::cerr << "hipMalloc result = " << result << std::endl;
return result;
}
result = hipMalloc(&C, count_C * sizeof(float));
if (result != hipSuccess) {
std::cerr << "hipMalloc result = " << result << std::endl;
return result;
}
// fill A
for (int b_idx = 0; b_idx < batch_count; b_idx++) {
for (int col_idx = 0; col_idx < k; col_idx++) {
for (int row_idx = 0; row_idx < m; row_idx++) {
host_A[row_idx + col_idx * lda + b_idx * lda * k] = static_cast<float>(row_idx + col_idx * lda + b_idx * lda * k);
}
}
}
// fill B
for (int b_idx = 0; b_idx < batch_count; b_idx++) {
for (int col_idx = 0; col_idx < n; col_idx++) {
for (int row_idx = 0; row_idx < k; row_idx++) {
host_B[row_idx + col_idx * ldb + b_idx * k] = static_cast<float>(n + k * ldb + batch_count * k) - static_cast<float>(row_idx + col_idx * ldb + b_idx * k);
}
}
}
// fill C
for (int b_idx = 0; b_idx < batch_count; b_idx++) {
for (int col_idx = 0; col_idx < n; col_idx++) {
for (int row_idx = 0; row_idx < m; row_idx++) {
host_C[row_idx + col_idx * ldc + b_idx * ldc * n] = 1.f;
}
}
}
// ref memory
std::vector<float> ref_A(host_A);
std::vector<float> ref_B(host_B);
std::vector<float> ref_C(host_C);
// copy host memory to device
result = hipMemcpy(A, host_A.data(), count_A * sizeof(float), hipMemcpyHostToDevice);
if (result != hipSuccess) {
std::cerr << "hipMemcpy result = " << result << std::endl;
return result;
}
result = hipMemcpy(B, host_B.data(), count_B * sizeof(float), hipMemcpyHostToDevice);
if (result != hipSuccess) {
std::cerr << "hipMemcpy result = " << result << std::endl;
return result;
}
result = hipMemcpy(C, host_C.data(), count_C * sizeof(float), hipMemcpyHostToDevice);
if (result != hipSuccess) {
std::cerr << "hipMemcpy result = " << result << std::endl;
return result;
}
// run cutlass
result = cutlass_strided_batched_sgemm(A, lda, batch_stride_A, B, ldb, batch_stride_B, C, ldc, batch_stride_C,
alpha, beta, m, n, k, batch_count);
if (result != hipSuccess)
return result;
// copy device memory to host
result = hipMemcpy(result_C.data(), C, count_C * sizeof(float), hipMemcpyDeviceToHost);
if (result != hipSuccess) {
std::cerr << "hipMemcpy result = " << result << std::endl;
return result;
}
//compare with reference code
result = strided_batched_gemm_nn_reference(ref_A, lda, batch_stride_A, ref_B, ldb, batch_stride_B, ref_C, ldc, batch_stride_C,
alpha, beta, m, n, k, batch_count);
if (result != 0)
return result;
if (ref_C != result_C) {
std::cout << "CUTLASS strided batched gemm does not run correctly" << std::endl;
return hipErrorUnknown;
}
// free memory
result = hipFree(A);
if (result != hipSuccess) {
std::cerr << "hipFree result = " << result << std::endl;
return result;
}
result = hipFree(B);
if (result != hipSuccess) {
std::cerr << "hipFree result = " << result << std::endl;
return result;
}
result = hipFree(C);
if (result != hipSuccess) {
std::cerr << "hipFree result = " << result << std::endl;
return result;
}
if (result == hipSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == hipSuccess ? 0 : -1;
}
| 0ae70dd930f7dfd713680d085e3e962f43156b43.cu | /***************************************************************************************************
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <iostream>
#include <vector>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/sgemm_traits.h"
#pragma warning( disable : 4503)
/*
This example demonstrates how to use cutlass to compute a batched strided gemm.
In this example, both A and B matrix are non-transpose and column major matrix
batched_C = batched_A x batched_B
As an example, matrix C can be seen as
-----------------------------------------------------------
(0,0,0) | (0,0,1) | (0,0,2) | (1,0,0) | (1,0,1) | (1,0,2) |
-----------------------------------------------------------
(0,1,0) | (0,1,1) | (0,1,2) | (1,1,0) | (1,1,1) | (1,1,2) |
-----------------------------------------------------------
(0,2,0) | (0,2,1) | (0,2,2) | (1,2,0) | (1,2,1) | (1,2,2) |
-----------------------------------------------------------
(0,3,0) | (0,3,1) | (0,3,2) | (1,3,0) | (1,3,1) | (1,3,2) |
-----------------------------------------------------------
(0,4,0) | (0,4,1) | (0,4,2) | (1,4,0) | (1,4,1) | (1,4,2) |
-----------------------------------------------------------
(0,5,0) | (0,5,1) | (0,5,2) | (1,5,0) | (1,5,1) | (1,5,2) |
-----------------------------------------------------------
batch 0 | batch 1
where we denote each element with (batch_idx, row_idx, column_idx)
In this example, batch size is 2, M is 6 and N is 3
The stride (batch_stride_C) between the first element of two batches is ldc * n
matrix A can be seen as
---------------------------------------
(0,0,0) | (0,0,1) | (1,0,0) | (1,0,1) |
---------------------------------------
(0,1,0) | (0,1,1) | (1,1,0) | (1,1,1) |
---------------------------------------
(0,2,0) | (0,2,1) | (1,2,0) | (1,2,1) |
---------------------------------------
(0,3,0) | (0,3,1) | (1,3,0) | (1,3,1) |
---------------------------------------
(0,4,0) | (0,4,1) | (1,4,0) | (1,4,1) |
---------------------------------------
(0,5,0) | (0,5,1) | (1,5,0) | (1,5,1) |
---------------------------------------
batch 0 | batch 1
, where batch size is 2, M is 6 and K is 2
The stride (batch_stride_B) between the first element of two batches is lda * k
matrix B can be seen as
-----------------------------
(0,0,0) | (0,0,1) | (0,0,2) |
----------------------------- batch 0
(0,1,0) | (0,1,1) | (0,1,2) |
-------------------------------------
(1,0,0) | (1,0,1) | (1,0,2) |
----------------------------- batch 1
(1,1,0) | (1,1,1) | (1,1,2) |
-----------------------------
, where the batch size is 2, N is 3 and K is 2
The stride (batch_stride_C) between the first element of two batches is k
*/
cudaError_t cutlass_strided_batched_sgemm(float const *A,
int lda,
long long int batch_stride_A,
float const *B,
int ldb,
long long int batch_stride_B,
float *C,
int ldc,
long long int batch_stride_C,
float alpha,
float beta,
int m,
int n,
int k,
int batch_count) {
// create a cutlass traits
typedef cutlass::gemm::SgemmTraits<cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor, cutlass::Shape<8, 128, 128> >
SgemmTraits;
// create a CUTLASS GEMM object.
typedef cutlass::gemm::Gemm<SgemmTraits> Gemm;
// Construct and initialize CUTLASS GEMM parameters object.
typename Gemm::Params params;
int result = params.initialize(
m, // M dimension for each batch
n, // N dimension for each batch
k, // K dimension for each batch
alpha, // scalar alpha
A,
lda,
batch_stride_A, // distance in memory between the first element of neighboring batch
B,
ldb,
batch_stride_B, // distance in memory between the first element of neighboring batch
beta, // scalar beta
C, // source matrix C
ldc,
batch_stride_C, // distance in memory between the first element of neighboring batch
C, // destination matrix C (may be different memory than source C matrix)
ldc,
batch_stride_C, // distance in memory between the first element of neighboring batch
batch_count
);
if (result != 0) {
std::cerr << "Failed to initialize CUTLASS Gemm::Params object." << std::endl;
return cudaErrorInvalidValue;
}
// Launch the CUTLASS GEMM kernel.
Gemm::launch(params);
result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "kernel launch result = " << result << std::endl;
}
return cudaGetLastError();
}
template<typename T>
cudaError_t strided_batched_gemm_nn_reference(std::vector<T> const &A,
int lda,
long long int batch_stride_A,
std::vector<T> const &B,
int ldb,
long long int batch_stride_B,
std::vector<T> &C,
int ldc,
long long int batch_stride_C,
T alpha,
T beta,
int m,
int n,
int k,
int batch_count) {
/*
strided batched gemm NN
*/
cudaError_t result = cudaSuccess;
if (A.size() < lda * k * batch_count) {
std::cout << "the size of A is too small" << std::endl;
return cudaErrorInvalidValue;
}
if (B.size() < ldb * n) {
std::cout << "the size of B is too small" << std::endl;
return cudaErrorInvalidValue;
}
if (C.size() < ldc * n * batch_count) {
std::cout << "the size of C is too small" << std::endl;
return cudaErrorInvalidValue;
}
for (int batch_idx = 0; batch_idx < batch_count; batch_idx++) {
for (int n_idx = 0; n_idx < n; n_idx++) {
for (int m_idx = 0; m_idx < m; m_idx++) {
T accum = beta * C[batch_idx * batch_stride_C + n_idx * ldc + m_idx];
for (int k_idx = 0; k_idx < k; k_idx++) {
accum += alpha
* A[batch_idx * batch_stride_A + k_idx * lda + m_idx]
* B[batch_idx * batch_stride_B + n_idx * ldb + k_idx];
}
C[batch_idx * batch_stride_C + n_idx * ldc + m_idx] = accum;
}
}
}
return result;
}
int main() {
int const m = 16;
int const n = 24;
int const k = 8;
int const batch_count = 3;
// A, B are non-transpose, column major
int const lda = m;
int const ldb = k * batch_count;
int const ldc = m;
int const count_A = batch_count * lda * k;
int const count_B = ldb * n;
int const count_C = batch_count * ldc * n;
// the memory is batched along K dimension
long long int batch_stride_A = static_cast<long long int>(lda) * static_cast<long long int>(k);
long long int batch_stride_B = static_cast<long long int>(k);
long long int batch_stride_C = static_cast<long long int>(ldc) * static_cast<long long int>(n);
// alpha and beta
float alpha = 1.0f;
float beta = 2.0f;
cudaError_t result = cudaSuccess;
// allocate the host memory
std::vector<float> host_A(count_A);
std::vector<float> host_B(count_B);
std::vector<float> host_C(count_C);
std::vector<float> result_C(count_C);
// allocate the device memory
float *A;
float *B;
float *C;
result = cudaMalloc(&A, count_A * sizeof(float));
if (result != cudaSuccess) {
std::cerr << "cudaMalloc result = " << result << std::endl;
return result;
}
result = cudaMalloc(&B, count_B * sizeof(float));
if (result != cudaSuccess) {
std::cerr << "cudaMalloc result = " << result << std::endl;
return result;
}
result = cudaMalloc(&C, count_C * sizeof(float));
if (result != cudaSuccess) {
std::cerr << "cudaMalloc result = " << result << std::endl;
return result;
}
// fill A
for (int b_idx = 0; b_idx < batch_count; b_idx++) {
for (int col_idx = 0; col_idx < k; col_idx++) {
for (int row_idx = 0; row_idx < m; row_idx++) {
host_A[row_idx + col_idx * lda + b_idx * lda * k] = static_cast<float>(row_idx + col_idx * lda + b_idx * lda * k);
}
}
}
// fill B
for (int b_idx = 0; b_idx < batch_count; b_idx++) {
for (int col_idx = 0; col_idx < n; col_idx++) {
for (int row_idx = 0; row_idx < k; row_idx++) {
host_B[row_idx + col_idx * ldb + b_idx * k] = static_cast<float>(n + k * ldb + batch_count * k) - static_cast<float>(row_idx + col_idx * ldb + b_idx * k);
}
}
}
// fill C
for (int b_idx = 0; b_idx < batch_count; b_idx++) {
for (int col_idx = 0; col_idx < n; col_idx++) {
for (int row_idx = 0; row_idx < m; row_idx++) {
host_C[row_idx + col_idx * ldc + b_idx * ldc * n] = 1.f;
}
}
}
// ref memory
std::vector<float> ref_A(host_A);
std::vector<float> ref_B(host_B);
std::vector<float> ref_C(host_C);
// copy host memory to device
result = cudaMemcpy(A, host_A.data(), count_A * sizeof(float), cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
std::cerr << "cudaMemcpy result = " << result << std::endl;
return result;
}
result = cudaMemcpy(B, host_B.data(), count_B * sizeof(float), cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
std::cerr << "cudaMemcpy result = " << result << std::endl;
return result;
}
result = cudaMemcpy(C, host_C.data(), count_C * sizeof(float), cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
std::cerr << "cudaMemcpy result = " << result << std::endl;
return result;
}
// run cutlass
result = cutlass_strided_batched_sgemm(A, lda, batch_stride_A, B, ldb, batch_stride_B, C, ldc, batch_stride_C,
alpha, beta, m, n, k, batch_count);
if (result != cudaSuccess)
return result;
// copy device memory to host
result = cudaMemcpy(result_C.data(), C, count_C * sizeof(float), cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
std::cerr << "cudaMemcpy result = " << result << std::endl;
return result;
}
//compare with reference code
result = strided_batched_gemm_nn_reference(ref_A, lda, batch_stride_A, ref_B, ldb, batch_stride_B, ref_C, ldc, batch_stride_C,
alpha, beta, m, n, k, batch_count);
if (result != 0)
return result;
if (ref_C != result_C) {
std::cout << "CUTLASS strided batched gemm does not run correctly" << std::endl;
return cudaErrorUnknown;
}
// free memory
result = cudaFree(A);
if (result != cudaSuccess) {
std::cerr << "cudaFree result = " << result << std::endl;
return result;
}
result = cudaFree(B);
if (result != cudaSuccess) {
std::cerr << "cudaFree result = " << result << std::endl;
return result;
}
result = cudaFree(C);
if (result != cudaSuccess) {
std::cerr << "cudaFree result = " << result << std::endl;
return result;
}
if (result == cudaSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == cudaSuccess ? 0 : -1;
}
|
fad9f2d15619798baca63ffebd14012262376d53.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
{
__global__ void A_emult_Bg0(const int n, const double *a, const double *b, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<n)
{
if (b[i]>0.0)
{c[i] += a[i];}
else
{c[i] += 0.0;}
}
}
} | fad9f2d15619798baca63ffebd14012262376d53.cu | extern "C"
{
__global__ void A_emult_Bg0(const int n, const double *a, const double *b, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<n)
{
if (b[i]>0.0)
{c[i] += a[i];}
else
{c[i] += 0.0;}
}
}
} |
03eef736bdfaf4f7a58f4272f4c554447a487ecc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lab2.h"
static const unsigned W = 640;
static const unsigned H = 480;
static const unsigned NFRAME = 240;
#define IX(i,j) ((i)+(N+2)*(j))
/*
This stable fluid solver is provided by Jos Stam
In this lab I use the following twp function
vel_step ( N, u, v, u_prev, v_prev, visc, dt );
dens_step ( N, dens, dens_prev, u, v, diff, dt );
I read the code and modify the display function
"drawDensityCuda", draw the density in dens array
with GPU acceleration.
Reference: Stam, Jos. "Real-time fluid dynamics for games." Proceedings of the game developer conference. Vol. 18. 2003.
*/
#include "solver.c"
static int N = 128;
static float dt=0.1f, diff=0.0f, visc=0.0f;
static float force=5.0f, source=1500.0f;
static float * u, * v, * u_prev, * v_prev;
static float * dens, * dens_prev;
int convert(int r,int c) {
return c * W + r;
}
void init() {
int size = (N+2)*(N+2);
u = (float *) malloc ( size*sizeof(float) );
v = (float *) malloc ( size*sizeof(float) );
u_prev = (float *) malloc ( size*sizeof(float) );
v_prev = (float *) malloc ( size*sizeof(float) );
dens = (float *) malloc ( size*sizeof(float) );
dens_prev = (float *) malloc ( size*sizeof(float) );
if ( !u || !v || !u_prev || !v_prev || !dens || !dens_prev ) {
fprintf ( stderr, "cannot allocate data\n" );
}
//add density:
int i, j;
for ( i=0 ; i<size ; i++ ) {
u[i] = v[i] = dens_prev[i] = 0.0f;
}
}
void animate_parameter(float * d, float * u, float * v,int xx,int yy, int forcex, int forcey) {
int i, j, size = (N+2)*(N+2);
for ( i=0 ; i<size ; i++ ) {
u[i] = v[i] = d[i] = 0.0f;
}
if(xx==0||yy==0) return;
i = (int)(( xx /(float)W)*N+1);
j = (int)(((H-yy)/(float)H)*N+1);
if ( i<1 || i>N || j<1 || j>N ) return;
u[IX(i,j)] = force * forcex;
v[IX(i,j)] = force * forcey;
d[IX(i,j)] = source;
return;
}
void animate(int t) {
if (t >= 1 && t<=2){
animate_parameter ( dens_prev, u_prev, v_prev, 100,100,50,50 );
} else if (t >=3 && t<=5) {
animate_parameter ( dens_prev, u_prev, v_prev, 400,100,-50,-50 );
} else if (t >= 20 && t < 80) {
animate_parameter ( dens_prev, u_prev, v_prev, 120,250,30,-20-(t-30) );
} else if (t >= 120 && t < 150) {
//animate_parameter ( dens_prev, u_prev, v_prev, 220,230,30,-20-(t-30) );
}else if (t >= 180 && t<= 190){
//animate_parameter ( dens_prev, u_prev, v_prev, 220,230,30,-20-(t-30) );
}
else {
animate_parameter ( dens_prev, u_prev, v_prev, 0,0,10,10 );
}
vel_step ( N, u, v, u_prev, v_prev, visc, dt );
dens_step ( N, dens, dens_prev, u, v, diff, dt );
}
void add_force(int xx, int yy){
int i,j;
i = (int)(( xx /(float)W)*N+1);
j = (int)(((H-yy)/(float)H)*N+1);
if ( i<1 || i>N || j<1 || j>N ) return;
u[IX(i,j)] = force * 10;
v[IX(i,j)] = force * 10;
}
void add_dens(int mx, int my) {
int i = (int)(( mx /(float)W)*N+1);
int j = (int)(((H-my)/(float)H)*N+1);
dens_prev[IX(i,j)] = source;
printf("add: %d,%d,%f\n",i,j,dens_prev[IX(i,j)]);
}
struct Lab2VideoGenerator::Impl {
int t = 0;
int posx = 0;
int posy=0;
};
Lab2VideoGenerator::Lab2VideoGenerator(): impl(new Impl) {
}
Lab2VideoGenerator::~Lab2VideoGenerator() {}
void Lab2VideoGenerator::get_info(Lab2VideoInfo &info) {
info.w = W;
info.h = H;
info.n_frame = NFRAME;
// fps = 24/1 = 24
info.fps_n = 24;
info.fps_d = 1;
};
void drawPoint(uint8_t *yuv, int x, int y,float color) {
float RGBcolor = color*25500 > 255 ? 255: color*25500;
//printf("%f ",RGBcolor);
for(int i = y; i < y + 2; i++) {
int pos = convert(x, i);
hipMemset(yuv + pos, int(RGBcolor), 2);
}
}
__global__ void drawDensityCuda(uint8_t *yuv, float* dens, int W, int H){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int N = 128;
if (i > N || j > N) return;
float x, y, h, d00, d01, d10, d11;
h = 1.0f/N;
x = (i-0.5f)*h;
y = (j-0.5f)*h;
d00 = dens[IX(i,j)];
//drawPointCuda(yuv, int(x*W), H-int(y*H), d00);
float RGBcolor = d00*25500 > 255 ? 255: d00*25500;
int yy = H-int(y*H);
for(int ii = yy; ii < yy + 2; ii++) {
int pos = int(x*W)+ii*W;
yuv[pos] = int(RGBcolor);
yuv[pos+1] = int(RGBcolor);
//hipMemset(yuv + pos, int(RGBcolor), 2);
}
}
void drawDensity(uint8_t *yuv) {
int i, j;
float x, y, h, d00, d01, d10, d11;
h = 1.0f/N;
for ( i=0 ; i<=N ; i++ ) {
x = (i-0.5f)*h;
for ( j=0 ; j<=N ; j++ ) {
y = (j-0.5f)*h;
d00 = dens[IX(i,j)];
//d01 = dens[IX(i,j+1)];
//d10 = dens[IX(i+1,j)];
//d11 = dens[IX(i+1,j+1)];
drawPoint(yuv, int(x*W), H-int(y*H), d00);
//drawPoint(yuv, int((x+h)*W), H-int(y*H), d10);
//drawPoint(yuv, int((x+h)*W), H-int((y+h)*H), d11);
//drawPoint(yuv, int(x*W), H-int((y+h)*H), d01);
}
}
}
void changeDens(){
int i,j;
int index = 30;
for(i=10;i<42;i++){
dens[IX(index,i)] = 0.0f;
}
index = 20;
for(i=index;i<30;i++){
dens[IX(i,10)] = 0.0f;
}
for(i=index;i<30;i++){
dens[IX(i,25)] = 0.0f;
}
for(i=index;i<30;i++){
dens[IX(i,40)] = 0.0f;
}
}
void changeDens2(){
int i,j;
int index = 30;
for(i=24;i<42;i++){
dens[IX(index,i)] = 0.0f;
}
index = 20;
for(i=10;i<24;i++){
dens[IX(index,i)] = 0.0f;
}
for(i=index;i<30;i++){
dens[IX(i,10)] = 0.0f;
}
for(i=index;i<30;i++){
dens[IX(i,25)] = 0.0f;
}
for(i=index;i<30;i++){
dens[IX(i,40)] = 0.0f;
}
}
void changeDens1(){
int i,j;
int index = 30;
for(i=10;i<42;i++){
dens[IX(index,i)] = 0.0f;
}
}
void Lab2VideoGenerator::Generate(uint8_t *yuv) {
int size = (N+2)*(N+2);
if (impl->t == 0) {
hipMemset(yuv,0,W*H);
init();
}
/*for(int i=0;i<(N+2)*(N+2);i++){
printf("%f ",dens[i]);
}
printf("===========================\n");*/
printf("%d\n",impl->t);
if(impl->t < 120){
changeDens();
} else if (impl->t < 180) {
changeDens2();
} else if (impl->t < 260){
changeDens1();
}
//drawDensity(yuv);
float *d_dens;
hipMalloc(&d_dens, size*sizeof(float));
hipMemcpy(d_dens, dens, size*sizeof(float), hipMemcpyHostToDevice);
dim3 block(16,16);
dim3 grid ( N/16, N/16 );
hipLaunchKernelGGL(( drawDensityCuda), dim3(grid),dim3(block), 0, 0, yuv,d_dens,W,H);
animate(impl->t);
//}
hipMemset(yuv+W*H, 128, W*H/2);
++(impl->t);
}
| 03eef736bdfaf4f7a58f4272f4c554447a487ecc.cu | #include "lab2.h"
static const unsigned W = 640;
static const unsigned H = 480;
static const unsigned NFRAME = 240;
#define IX(i,j) ((i)+(N+2)*(j))
/*
This stable fluid solver is provided by Jos Stam
In this lab I use the following twp function
vel_step ( N, u, v, u_prev, v_prev, visc, dt );
dens_step ( N, dens, dens_prev, u, v, diff, dt );
I read the code and modify the display function
"drawDensityCuda", draw the density in dens array
with GPU acceleration.
Reference: Stam, Jos. "Real-time fluid dynamics for games." Proceedings of the game developer conference. Vol. 18. 2003.
*/
#include "solver.c"
static int N = 128;
static float dt=0.1f, diff=0.0f, visc=0.0f;
static float force=5.0f, source=1500.0f;
static float * u, * v, * u_prev, * v_prev;
static float * dens, * dens_prev;
int convert(int r,int c) {
return c * W + r;
}
void init() {
int size = (N+2)*(N+2);
u = (float *) malloc ( size*sizeof(float) );
v = (float *) malloc ( size*sizeof(float) );
u_prev = (float *) malloc ( size*sizeof(float) );
v_prev = (float *) malloc ( size*sizeof(float) );
dens = (float *) malloc ( size*sizeof(float) );
dens_prev = (float *) malloc ( size*sizeof(float) );
if ( !u || !v || !u_prev || !v_prev || !dens || !dens_prev ) {
fprintf ( stderr, "cannot allocate data\n" );
}
//add density:
int i, j;
for ( i=0 ; i<size ; i++ ) {
u[i] = v[i] = dens_prev[i] = 0.0f;
}
}
void animate_parameter(float * d, float * u, float * v,int xx,int yy, int forcex, int forcey) {
int i, j, size = (N+2)*(N+2);
for ( i=0 ; i<size ; i++ ) {
u[i] = v[i] = d[i] = 0.0f;
}
if(xx==0||yy==0) return;
i = (int)(( xx /(float)W)*N+1);
j = (int)(((H-yy)/(float)H)*N+1);
if ( i<1 || i>N || j<1 || j>N ) return;
u[IX(i,j)] = force * forcex;
v[IX(i,j)] = force * forcey;
d[IX(i,j)] = source;
return;
}
void animate(int t) {
if (t >= 1 && t<=2){
animate_parameter ( dens_prev, u_prev, v_prev, 100,100,50,50 );
} else if (t >=3 && t<=5) {
animate_parameter ( dens_prev, u_prev, v_prev, 400,100,-50,-50 );
} else if (t >= 20 && t < 80) {
animate_parameter ( dens_prev, u_prev, v_prev, 120,250,30,-20-(t-30) );
} else if (t >= 120 && t < 150) {
//animate_parameter ( dens_prev, u_prev, v_prev, 220,230,30,-20-(t-30) );
}else if (t >= 180 && t<= 190){
//animate_parameter ( dens_prev, u_prev, v_prev, 220,230,30,-20-(t-30) );
}
else {
animate_parameter ( dens_prev, u_prev, v_prev, 0,0,10,10 );
}
vel_step ( N, u, v, u_prev, v_prev, visc, dt );
dens_step ( N, dens, dens_prev, u, v, diff, dt );
}
void add_force(int xx, int yy){
int i,j;
i = (int)(( xx /(float)W)*N+1);
j = (int)(((H-yy)/(float)H)*N+1);
if ( i<1 || i>N || j<1 || j>N ) return;
u[IX(i,j)] = force * 10;
v[IX(i,j)] = force * 10;
}
void add_dens(int mx, int my) {
int i = (int)(( mx /(float)W)*N+1);
int j = (int)(((H-my)/(float)H)*N+1);
dens_prev[IX(i,j)] = source;
printf("add: %d,%d,%f\n",i,j,dens_prev[IX(i,j)]);
}
struct Lab2VideoGenerator::Impl {
int t = 0;
int posx = 0;
int posy=0;
};
Lab2VideoGenerator::Lab2VideoGenerator(): impl(new Impl) {
}
Lab2VideoGenerator::~Lab2VideoGenerator() {}
void Lab2VideoGenerator::get_info(Lab2VideoInfo &info) {
info.w = W;
info.h = H;
info.n_frame = NFRAME;
// fps = 24/1 = 24
info.fps_n = 24;
info.fps_d = 1;
};
void drawPoint(uint8_t *yuv, int x, int y,float color) {
float RGBcolor = color*25500 > 255 ? 255: color*25500;
//printf("%f ",RGBcolor);
for(int i = y; i < y + 2; i++) {
int pos = convert(x, i);
cudaMemset(yuv + pos, int(RGBcolor), 2);
}
}
__global__ void drawDensityCuda(uint8_t *yuv, float* dens, int W, int H){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int N = 128;
if (i > N || j > N) return;
float x, y, h, d00, d01, d10, d11;
h = 1.0f/N;
x = (i-0.5f)*h;
y = (j-0.5f)*h;
d00 = dens[IX(i,j)];
//drawPointCuda(yuv, int(x*W), H-int(y*H), d00);
float RGBcolor = d00*25500 > 255 ? 255: d00*25500;
int yy = H-int(y*H);
for(int ii = yy; ii < yy + 2; ii++) {
int pos = int(x*W)+ii*W;
yuv[pos] = int(RGBcolor);
yuv[pos+1] = int(RGBcolor);
//cudaMemset(yuv + pos, int(RGBcolor), 2);
}
}
void drawDensity(uint8_t *yuv) {
int i, j;
float x, y, h, d00, d01, d10, d11;
h = 1.0f/N;
for ( i=0 ; i<=N ; i++ ) {
x = (i-0.5f)*h;
for ( j=0 ; j<=N ; j++ ) {
y = (j-0.5f)*h;
d00 = dens[IX(i,j)];
//d01 = dens[IX(i,j+1)];
//d10 = dens[IX(i+1,j)];
//d11 = dens[IX(i+1,j+1)];
drawPoint(yuv, int(x*W), H-int(y*H), d00);
//drawPoint(yuv, int((x+h)*W), H-int(y*H), d10);
//drawPoint(yuv, int((x+h)*W), H-int((y+h)*H), d11);
//drawPoint(yuv, int(x*W), H-int((y+h)*H), d01);
}
}
}
void changeDens(){
int i,j;
int index = 30;
for(i=10;i<42;i++){
dens[IX(index,i)] = 0.0f;
}
index = 20;
for(i=index;i<30;i++){
dens[IX(i,10)] = 0.0f;
}
for(i=index;i<30;i++){
dens[IX(i,25)] = 0.0f;
}
for(i=index;i<30;i++){
dens[IX(i,40)] = 0.0f;
}
}
void changeDens2(){
int i,j;
int index = 30;
for(i=24;i<42;i++){
dens[IX(index,i)] = 0.0f;
}
index = 20;
for(i=10;i<24;i++){
dens[IX(index,i)] = 0.0f;
}
for(i=index;i<30;i++){
dens[IX(i,10)] = 0.0f;
}
for(i=index;i<30;i++){
dens[IX(i,25)] = 0.0f;
}
for(i=index;i<30;i++){
dens[IX(i,40)] = 0.0f;
}
}
void changeDens1(){
int i,j;
int index = 30;
for(i=10;i<42;i++){
dens[IX(index,i)] = 0.0f;
}
}
void Lab2VideoGenerator::Generate(uint8_t *yuv) {
int size = (N+2)*(N+2);
if (impl->t == 0) {
cudaMemset(yuv,0,W*H);
init();
}
/*for(int i=0;i<(N+2)*(N+2);i++){
printf("%f ",dens[i]);
}
printf("===========================\n");*/
printf("%d\n",impl->t);
if(impl->t < 120){
changeDens();
} else if (impl->t < 180) {
changeDens2();
} else if (impl->t < 260){
changeDens1();
}
//drawDensity(yuv);
float *d_dens;
cudaMalloc(&d_dens, size*sizeof(float));
cudaMemcpy(d_dens, dens, size*sizeof(float), cudaMemcpyHostToDevice);
dim3 block(16,16);
dim3 grid ( N/16, N/16 );
drawDensityCuda<<<grid,block>>>(yuv,d_dens,W,H);
animate(impl->t);
//}
cudaMemset(yuv+W*H, 128, W*H/2);
++(impl->t);
}
|
067a156399fbb3c6d5510962cf21320f91233741.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include <stdint.h>
inline __device__ float sigmoidGPU(const float& x) { return 1.0f / (1.0f + __expf(-x)); }
__global__ void gpuYoloLayer(const float* input, float* boxes, float* scores, float* classes, const uint netWidth,
const uint netHeight, const uint gridSizeX, const uint gridSizeY, const uint numOutputClasses, const uint numBBoxes,
const uint64_t lastInputSize, const float scaleXY, const float* anchors, const int* mask)
{
uint x_id = blockIdx.x * blockDim.x + threadIdx.x;
uint y_id = blockIdx.y * blockDim.y + threadIdx.y;
uint z_id = blockIdx.z * blockDim.z + threadIdx.z;
if (x_id >= gridSizeX || y_id >= gridSizeY || z_id >= numBBoxes)
return;
const int numGridCells = gridSizeX * gridSizeY;
const int bbindex = y_id * gridSizeX + x_id;
const float alpha = scaleXY;
const float beta = -0.5 * (scaleXY - 1);
float xc = (sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]) * alpha + beta + x_id)
* netWidth / gridSizeX;
float yc = (sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]) * alpha + beta + y_id)
* netHeight / gridSizeY;
float w = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]) * anchors[mask[z_id] * 2];
float h = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]) * anchors[mask[z_id] * 2 + 1];
const float objectness = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]);
float maxProb = 0.0f;
int maxIndex = -1;
for (uint i = 0; i < numOutputClasses; ++i) {
float prob = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]);
if (prob > maxProb) {
maxProb = prob;
maxIndex = i;
}
}
int count = z_id * gridSizeX * gridSizeY + y_id * gridSizeY + x_id + lastInputSize;
boxes[count * 4 + 0] = xc;
boxes[count * 4 + 1] = yc;
boxes[count * 4 + 2] = w;
boxes[count * 4 + 3] = h;
scores[count] = maxProb * objectness;
classes[count] = (float) maxIndex;
}
hipError_t cudaYoloLayer(const void* input, void* boxes, void* scores, void* classes, const uint& batchSize,
const uint64_t& inputSize, const uint64_t& outputSize, const uint64_t& lastInputSize, const uint& netWidth,
const uint& netHeight, const uint& gridSizeX, const uint& gridSizeY, const uint& numOutputClasses, const uint& numBBoxes,
const float& scaleXY, const void* anchors, const void* mask, hipStream_t stream);
hipError_t cudaYoloLayer(const void* input, void* boxes, void* scores, void* classes, const uint& batchSize,
const uint64_t& inputSize, const uint64_t& outputSize, const uint64_t& lastInputSize, const uint& netWidth,
const uint& netHeight, const uint& gridSizeX, const uint& gridSizeY, const uint& numOutputClasses, const uint& numBBoxes,
const float& scaleXY, const void* anchors, const void* mask, hipStream_t stream)
{
dim3 threads_per_block(16, 16, 4);
dim3 number_of_blocks((gridSizeX / threads_per_block.x) + 1, (gridSizeY / threads_per_block.y) + 1,
(numBBoxes / threads_per_block.z) + 1);
for (unsigned int batch = 0; batch < batchSize; ++batch) {
hipLaunchKernelGGL(( gpuYoloLayer), dim3(number_of_blocks), dim3(threads_per_block), 0, stream,
reinterpret_cast<const float*> (input) + (batch * inputSize),
reinterpret_cast<float*> (boxes) + (batch * 4 * outputSize),
reinterpret_cast<float*> (scores) + (batch * 1 * outputSize),
reinterpret_cast<float*> (classes) + (batch * 1 * outputSize),
netWidth, netHeight, gridSizeX, gridSizeY, numOutputClasses, numBBoxes, lastInputSize, scaleXY,
reinterpret_cast<const float*> (anchors), reinterpret_cast<const int*> (mask));
}
return hipGetLastError();
}
| 067a156399fbb3c6d5510962cf21320f91233741.cu | /*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include <stdint.h>
inline __device__ float sigmoidGPU(const float& x) { return 1.0f / (1.0f + __expf(-x)); }
__global__ void gpuYoloLayer(const float* input, float* boxes, float* scores, float* classes, const uint netWidth,
const uint netHeight, const uint gridSizeX, const uint gridSizeY, const uint numOutputClasses, const uint numBBoxes,
const uint64_t lastInputSize, const float scaleXY, const float* anchors, const int* mask)
{
uint x_id = blockIdx.x * blockDim.x + threadIdx.x;
uint y_id = blockIdx.y * blockDim.y + threadIdx.y;
uint z_id = blockIdx.z * blockDim.z + threadIdx.z;
if (x_id >= gridSizeX || y_id >= gridSizeY || z_id >= numBBoxes)
return;
const int numGridCells = gridSizeX * gridSizeY;
const int bbindex = y_id * gridSizeX + x_id;
const float alpha = scaleXY;
const float beta = -0.5 * (scaleXY - 1);
float xc = (sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]) * alpha + beta + x_id)
* netWidth / gridSizeX;
float yc = (sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]) * alpha + beta + y_id)
* netHeight / gridSizeY;
float w = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]) * anchors[mask[z_id] * 2];
float h = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]) * anchors[mask[z_id] * 2 + 1];
const float objectness = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]);
float maxProb = 0.0f;
int maxIndex = -1;
for (uint i = 0; i < numOutputClasses; ++i) {
float prob = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]);
if (prob > maxProb) {
maxProb = prob;
maxIndex = i;
}
}
int count = z_id * gridSizeX * gridSizeY + y_id * gridSizeY + x_id + lastInputSize;
boxes[count * 4 + 0] = xc;
boxes[count * 4 + 1] = yc;
boxes[count * 4 + 2] = w;
boxes[count * 4 + 3] = h;
scores[count] = maxProb * objectness;
classes[count] = (float) maxIndex;
}
cudaError_t cudaYoloLayer(const void* input, void* boxes, void* scores, void* classes, const uint& batchSize,
const uint64_t& inputSize, const uint64_t& outputSize, const uint64_t& lastInputSize, const uint& netWidth,
const uint& netHeight, const uint& gridSizeX, const uint& gridSizeY, const uint& numOutputClasses, const uint& numBBoxes,
const float& scaleXY, const void* anchors, const void* mask, cudaStream_t stream);
cudaError_t cudaYoloLayer(const void* input, void* boxes, void* scores, void* classes, const uint& batchSize,
const uint64_t& inputSize, const uint64_t& outputSize, const uint64_t& lastInputSize, const uint& netWidth,
const uint& netHeight, const uint& gridSizeX, const uint& gridSizeY, const uint& numOutputClasses, const uint& numBBoxes,
const float& scaleXY, const void* anchors, const void* mask, cudaStream_t stream)
{
dim3 threads_per_block(16, 16, 4);
dim3 number_of_blocks((gridSizeX / threads_per_block.x) + 1, (gridSizeY / threads_per_block.y) + 1,
(numBBoxes / threads_per_block.z) + 1);
for (unsigned int batch = 0; batch < batchSize; ++batch) {
gpuYoloLayer<<<number_of_blocks, threads_per_block, 0, stream>>>(
reinterpret_cast<const float*> (input) + (batch * inputSize),
reinterpret_cast<float*> (boxes) + (batch * 4 * outputSize),
reinterpret_cast<float*> (scores) + (batch * 1 * outputSize),
reinterpret_cast<float*> (classes) + (batch * 1 * outputSize),
netWidth, netHeight, gridSizeX, gridSizeY, numOutputClasses, numBBoxes, lastInputSize, scaleXY,
reinterpret_cast<const float*> (anchors), reinterpret_cast<const int*> (mask));
}
return cudaGetLastError();
}
|
1fcb61d650096bc1dfeabc9fa16d05fe6f40d468.hip | // !!! This is a file automatically generated by hipify!!!
/*
Author : Kapil Agarwal
Date : 22 June 2012
Compile : make dgemm_mpi
Help : mpirun -n <no of processes> -host <host ip> ./dgemm_mpi -help
*/
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<mpi.h>
#include<string.h>
#include<sys/time.h>
#include<ctype.h>
#include<math.h>
#include "rocblas.h"
#define ERROR 1.0e-12
void call_finalize()
{
MPI_Finalize();
exit(-1);
}
void safe_call(hipError_t ret, int myrank, int line)
{
if(ret!=hipSuccess)
{
printf("Error on Process %d at line %d : %s\n",myrank,line,hipGetErrorString(ret));
call_finalize();
}
}
void mem_error(char *arrayname, int len, char *type, int myrank)
{
printf("\nMemory not sufficient to allocate for array %s\n\tProcess : %d \n\tMemory requested = %d number of %s elements\n",arrayname, myrank, len, type);
call_finalize();
}
void fill_data(double *arr, int len)
{
int i;
for(i=0;i<len;i++)
{
srand(time(NULL));
arr[i] = drand48();
}
}
int get_cmd_arg(int argc, char **arg, char *cmp, char *ret)
{
int i,j;
char *pch;
i=0;
for(j=0;j<argc;j++)
{
while(arg[j][i]=='-') { i++; }
if(i!=0)
{
if(pch=strstr(arg[j],cmp))
{
if(strcmp(cmp,"help") == 0)
return 1;
else if(pch=strpbrk(arg[j],"="))
{
strcpy(ret,pch+1);
return 1;
}
}
}
}
return 0;
}
int check(double *a, double *b, int row, int col)
{
int i;
for(i=0;i<row*col;i++)
if(fabs(a[i]-b[i])>ERROR)
{
return i;
}
return -1;
}
void printSyntax()
{
printf("Syntax : \n\
mpirun -n <no of processes> -host <host ip> ./dgemm_mpi -options\n\
\n\
-help\n\
-mode=MODE square(default),general\n\
if mode=square\n\
\t-rowA=no of rows in Square matrices\n\
if mode=general\n\
\t-rowA=no of rows in A\n\
\t-colB=no of cols in B\n\
\t-colA=no of cols in A\n");
}
int isint(char *str)
{
int i,len;
len = strlen(str);
for(i=0;i<len;i++)
if(!isdigit(str[i]))
return 0;
return 1;
}
void safe_call_cublas(hipblasStatus_t ret, int myrank, int line)
{
if(ret!=HIPBLAS_STATUS_SUCCESS)
{
printf("CUBLAS Error on Process %d at line %d : %s\n",myrank,line);
call_finalize();
}
}
void cpu_matmatmul(double *a, double *b, double *c, int rowA, int colB, int colA)
{
int i,j,k;
double result;
for(i=0;i<rowA;i++)
for(j=0;j<colB;j++)
{
result = 0.0;
for(k=0;k<colA;k++)
result += (a[i*colA+k] * b[k*colB+j]);
c[i*colB+j] = result;
}
}
void transpose(double *a, double *b, int row, int col)
{
int i,j;
for(i=0;i<row;i++)
for(j=0;j<col;j++)
b[j*row+i] = a[i*col+j];
}
void printMat(double *mat, int row, int col)
{
int i,j;
for(i=0;i<row;i++)
{
for(j=0;j<col;j++)
printf("%f ",mat[i*col+j]);
printf("\n");
}
printf("\n");
}
int main(int argc, char *argv[])
{
int comm_size, myrank, i, j, no_of_args, valid_args, materr;
int RowA, ColB, ColA;
char MODE[10], temp_arg[80];
char myname[MPI_MAX_PROCESSOR_NAME];
int namelen, devcount, device;
char devname[256];
hipDeviceProp_t devprop;
double *h_A, *h_B, *h_C, *cpu_C;
double *h_At, *h_Bt, *h_Ct;
double *d_A, *d_B, *d_C;
hipEvent_t start, stop;
double time;
float diff, gflops;
double alpha=1.0, beta=0.0;
float *sendbuf, *recvbuf;
int sendcnt, *recvcnts, *displs;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&comm_size);
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
// Default
strcpy(MODE,"square");
RowA = 16;
ColB = 16;
ColA = 16;
no_of_args = argc;
if(get_cmd_arg(argc,argv,"help",temp_arg) == 1)
{
no_of_args--;
if(myrank==0)
printSyntax();
call_finalize();
}
if(get_cmd_arg(argc,argv,"mode",temp_arg) == 1)
{
no_of_args--;
strcpy(MODE,temp_arg);
if(no_of_args==4 && strcmp(MODE,"general")==0)
{
valid_args = 1;
if(get_cmd_arg(argc,argv,"rowA",temp_arg) == 1)
{
no_of_args--;
if(isint(temp_arg))
RowA = atoi(temp_arg);
else
valid_args=0;
}
if(get_cmd_arg(argc,argv,"colB",temp_arg) == 1)
{
no_of_args--;
if(isint(temp_arg))
ColB = atoi(temp_arg);
else
valid_args=0;
}
if(get_cmd_arg(argc,argv,"colA",temp_arg) == 1)
{
no_of_args--;
if(isint(temp_arg))
ColA = atoi(temp_arg);
else
valid_args=0;
}
}
if(valid_args == 0)
{
if(myrank==0)
printf("Enter valid values for number of rows and columns of the matrices.\n");
call_finalize();
}
}
if(strcmp(MODE,"square")==0)
{
if(get_cmd_arg(argc,argv,"rowA",temp_arg) == 1)
{
no_of_args--;
if(isint(temp_arg))
{
RowA = atoi(temp_arg);
ColB = RowA;
ColA = RowA;
}
else
{
if(myrank==0)
printf("Enter valid values for number of rows and columns of the matrices.\n");
call_finalize();
}
}
}
if(no_of_args != 1)
{
if(myrank==0)
printSyntax();
call_finalize();
}
if(myrank == 0)
printf("MODE=%s RowA=%d ColB=%d ColA=%d\n",MODE,RowA,ColB,ColA);
MPI_Get_processor_name(myname, &namelen);
myname[namelen++] = (char)0;
safe_call(hipGetDeviceCount(&devcount),myrank,__LINE__);
sendcnt = (int)ceil((1.0*devcount-myrank)/comm_size);
sendbuf = (float *) malloc(sendcnt*sizeof(float));
recvbuf = (float *) malloc(devcount*sizeof(float));
recvcnts = (int *) malloc(comm_size*sizeof(int));
displs = (int *) malloc(comm_size*sizeof(int));
i=0,j=0;
if(devcount%comm_size)
for(;i<(devcount%comm_size);i++)
{
recvcnts[i] = (devcount/comm_size)+1;
displs[i] = j*sizeof(float);
j += recvcnts[i];
}
for(;i<comm_size;i++)
{
recvcnts[i] = devcount/comm_size;
displs[i] = j*sizeof(float);
j += recvcnts[i];
}
if(devcount > 0)
{
if(strcmp(MODE,"square") == 0 || strcmp(MODE,"general") == 0)
{
j=0;
for(i = myrank; i < devcount; i+=comm_size)
{
safe_call(hipSetDevice(i),myrank,__LINE__);
safe_call(hipGetDevice(&device),myrank,__LINE__);
if(device == i)
{
safe_call(hipGetDeviceProperties(&devprop,device),myrank,__LINE__);
strcpy(devname,devprop.name);
h_A =(double *)malloc(RowA*ColA*sizeof(double));
h_B = (double *)malloc(ColA*ColB*sizeof(double));
h_C = (double *)malloc(RowA*ColB*sizeof(double));
h_At =(double *)malloc(RowA*ColA*sizeof(double));
h_Bt = (double *)malloc(ColA*ColB*sizeof(double));
h_Ct = (double *)malloc(RowA*ColB*sizeof(double));
if(h_A==NULL)
mem_error("h_A",RowA*ColA,"double",myrank);
if(h_B==NULL)
mem_error("h_B",ColA*ColB,"double",myrank);
if(h_C==NULL)
mem_error("h_C",RowA*ColB,"double",myrank);
if(h_At==NULL)
mem_error("h_At",RowA*ColA,"double",myrank);
if(h_Bt==NULL)
mem_error("h_Bt",ColA*ColB,"double",myrank);
if(h_Ct==NULL)
mem_error("h_Ct",RowA*ColB,"double",myrank);
fill_data(h_A,RowA*ColA);
fill_data(h_B,ColB*ColA);
transpose(h_A,h_At,RowA,ColA);
transpose(h_B,h_Bt,ColA,ColB);
safe_call(hipEventCreate(&start),myrank,__LINE__);
safe_call(hipEventCreate(&stop),myrank,__LINE__);
safe_call_cublas(hipblasAlloc (RowA*ColA, sizeof(double), (void**)&d_A), myrank, __LINE__);
safe_call_cublas(hipblasAlloc (ColA*ColB, sizeof(double), (void**)&d_B), myrank, __LINE__);
safe_call_cublas(hipblasAlloc (RowA*ColB, sizeof(double), (void**)&d_C), myrank, __LINE__);
safe_call_cublas(hipblasSetVector (RowA*ColA, sizeof(double), h_At, 1, d_A, 1), myrank, __LINE__);
safe_call_cublas(hipblasSetVector (ColA*ColB, sizeof(double), h_Bt, 1, d_B, 1), myrank, __LINE__);
safe_call(hipEventRecord(start, 0), myrank, __LINE__);
hipblasDgemm('N','N',RowA,ColB,ColA,alpha,d_A,RowA,d_B,ColA,beta,d_C,RowA);
safe_call(hipEventRecord (stop, 0), myrank, __LINE__);
safe_call(hipEventSynchronize (stop), myrank, __LINE__);
safe_call_cublas(hipblasGetVector (RowA*ColB, sizeof(double), d_C, 1, h_Ct, 1), myrank, __LINE__);
safe_call(hipEventElapsedTime(&diff, start, stop), myrank, __LINE__);
time = diff *1.0e-3;
gflops=(1.0e-9 * (( 2.0 * RowA * ColB * ColA )/time));
cpu_C = (double *)malloc(RowA*ColB*sizeof(double));
if(cpu_C==NULL)
mem_error("cpu_C",RowA*ColB,"double",myrank);
cpu_matmatmul(h_A, h_B, cpu_C, RowA, ColB, ColA);
transpose(h_Ct,h_C,ColB,RowA);
if(materr=check(h_C,cpu_C,RowA,ColB)==-1)
sendbuf[j++] = gflops;
else
sendbuf[j++] = -1;
safe_call_cublas(hipblasFree(d_A),myrank,__LINE__);
safe_call_cublas(hipblasFree(d_B),myrank,__LINE__);
safe_call_cublas(hipblasFree(d_C),myrank,__LINE__);
free(h_A);
free(h_B);
free(h_C);
free(cpu_C);
free(h_At);
free(h_Bt);
free(h_Ct);
safe_call(hipEventDestroy(start),myrank,__LINE__);
safe_call(hipEventDestroy(stop),myrank,__LINE__);
}
}
MPI_Gatherv(sendbuf, sendcnt, MPI_FLOAT, recvbuf, recvcnts, displs, MPI_FLOAT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
if(myrank == 0)
for(i=0;i<devcount;i++)
if(recvbuf[i]!=-1)
printf("\n\
Device %d\n\
Mode : %s\n\
Dimensions of Matrix : \n\
\t rowA : %d\n\
\t colB : %d\n\
\t colA : %d\n\
Gflops\/s : %f\n",\
i,MODE,RowA,ColB,ColA,recvbuf[i]);
else
printf("Error : CPU and GPU result do not match on Device:%d\n",device);
}
else
{
if(myrank==0)
printf("Matrix mode choices : square/general\n");
}
}
else
{
if(myrank == 0)
printf("No devices found.\n");
}
MPI_Finalize();
return 0;
}
| 1fcb61d650096bc1dfeabc9fa16d05fe6f40d468.cu | /*
Author : Kapil Agarwal
Date : 22 June 2012
Compile : make dgemm_mpi
Help : mpirun -n <no of processes> -host <host ip> ./dgemm_mpi -help
*/
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<mpi.h>
#include<string.h>
#include<sys/time.h>
#include<ctype.h>
#include<math.h>
#include "cublas.h"
#define ERROR 1.0e-12
void call_finalize()
{
MPI_Finalize();
exit(-1);
}
void safe_call(cudaError_t ret, int myrank, int line)
{
if(ret!=cudaSuccess)
{
printf("Error on Process %d at line %d : %s\n",myrank,line,cudaGetErrorString(ret));
call_finalize();
}
}
void mem_error(char *arrayname, int len, char *type, int myrank)
{
printf("\nMemory not sufficient to allocate for array %s\n\tProcess : %d \n\tMemory requested = %d number of %s elements\n",arrayname, myrank, len, type);
call_finalize();
}
void fill_data(double *arr, int len)
{
int i;
for(i=0;i<len;i++)
{
srand(time(NULL));
arr[i] = drand48();
}
}
int get_cmd_arg(int argc, char **arg, char *cmp, char *ret)
{
int i,j;
char *pch;
i=0;
for(j=0;j<argc;j++)
{
while(arg[j][i]=='-') { i++; }
if(i!=0)
{
if(pch=strstr(arg[j],cmp))
{
if(strcmp(cmp,"help") == 0)
return 1;
else if(pch=strpbrk(arg[j],"="))
{
strcpy(ret,pch+1);
return 1;
}
}
}
}
return 0;
}
int check(double *a, double *b, int row, int col)
{
int i;
for(i=0;i<row*col;i++)
if(fabs(a[i]-b[i])>ERROR)
{
return i;
}
return -1;
}
void printSyntax()
{
printf("Syntax : \n\
mpirun -n <no of processes> -host <host ip> ./dgemm_mpi -options\n\
\n\
-help\n\
-mode=MODE square(default),general\n\
if mode=square\n\
\t-rowA=no of rows in Square matrices\n\
if mode=general\n\
\t-rowA=no of rows in A\n\
\t-colB=no of cols in B\n\
\t-colA=no of cols in A\n");
}
int isint(char *str)
{
int i,len;
len = strlen(str);
for(i=0;i<len;i++)
if(!isdigit(str[i]))
return 0;
return 1;
}
void safe_call_cublas(cublasStatus_t ret, int myrank, int line)
{
if(ret!=CUBLAS_STATUS_SUCCESS)
{
printf("CUBLAS Error on Process %d at line %d : %s\n",myrank,line);
call_finalize();
}
}
void cpu_matmatmul(double *a, double *b, double *c, int rowA, int colB, int colA)
{
int i,j,k;
double result;
for(i=0;i<rowA;i++)
for(j=0;j<colB;j++)
{
result = 0.0;
for(k=0;k<colA;k++)
result += (a[i*colA+k] * b[k*colB+j]);
c[i*colB+j] = result;
}
}
void transpose(double *a, double *b, int row, int col)
{
int i,j;
for(i=0;i<row;i++)
for(j=0;j<col;j++)
b[j*row+i] = a[i*col+j];
}
void printMat(double *mat, int row, int col)
{
int i,j;
for(i=0;i<row;i++)
{
for(j=0;j<col;j++)
printf("%f ",mat[i*col+j]);
printf("\n");
}
printf("\n");
}
int main(int argc, char *argv[])
{
int comm_size, myrank, i, j, no_of_args, valid_args, materr;
int RowA, ColB, ColA;
char MODE[10], temp_arg[80];
char myname[MPI_MAX_PROCESSOR_NAME];
int namelen, devcount, device;
char devname[256];
cudaDeviceProp devprop;
double *h_A, *h_B, *h_C, *cpu_C;
double *h_At, *h_Bt, *h_Ct;
double *d_A, *d_B, *d_C;
cudaEvent_t start, stop;
double time;
float diff, gflops;
double alpha=1.0, beta=0.0;
float *sendbuf, *recvbuf;
int sendcnt, *recvcnts, *displs;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&comm_size);
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
// Default
strcpy(MODE,"square");
RowA = 16;
ColB = 16;
ColA = 16;
no_of_args = argc;
if(get_cmd_arg(argc,argv,"help",temp_arg) == 1)
{
no_of_args--;
if(myrank==0)
printSyntax();
call_finalize();
}
if(get_cmd_arg(argc,argv,"mode",temp_arg) == 1)
{
no_of_args--;
strcpy(MODE,temp_arg);
if(no_of_args==4 && strcmp(MODE,"general")==0)
{
valid_args = 1;
if(get_cmd_arg(argc,argv,"rowA",temp_arg) == 1)
{
no_of_args--;
if(isint(temp_arg))
RowA = atoi(temp_arg);
else
valid_args=0;
}
if(get_cmd_arg(argc,argv,"colB",temp_arg) == 1)
{
no_of_args--;
if(isint(temp_arg))
ColB = atoi(temp_arg);
else
valid_args=0;
}
if(get_cmd_arg(argc,argv,"colA",temp_arg) == 1)
{
no_of_args--;
if(isint(temp_arg))
ColA = atoi(temp_arg);
else
valid_args=0;
}
}
if(valid_args == 0)
{
if(myrank==0)
printf("Enter valid values for number of rows and columns of the matrices.\n");
call_finalize();
}
}
if(strcmp(MODE,"square")==0)
{
if(get_cmd_arg(argc,argv,"rowA",temp_arg) == 1)
{
no_of_args--;
if(isint(temp_arg))
{
RowA = atoi(temp_arg);
ColB = RowA;
ColA = RowA;
}
else
{
if(myrank==0)
printf("Enter valid values for number of rows and columns of the matrices.\n");
call_finalize();
}
}
}
if(no_of_args != 1)
{
if(myrank==0)
printSyntax();
call_finalize();
}
if(myrank == 0)
printf("MODE=%s RowA=%d ColB=%d ColA=%d\n",MODE,RowA,ColB,ColA);
MPI_Get_processor_name(myname, &namelen);
myname[namelen++] = (char)0;
safe_call(cudaGetDeviceCount(&devcount),myrank,__LINE__);
sendcnt = (int)ceil((1.0*devcount-myrank)/comm_size);
sendbuf = (float *) malloc(sendcnt*sizeof(float));
recvbuf = (float *) malloc(devcount*sizeof(float));
recvcnts = (int *) malloc(comm_size*sizeof(int));
displs = (int *) malloc(comm_size*sizeof(int));
i=0,j=0;
if(devcount%comm_size)
for(;i<(devcount%comm_size);i++)
{
recvcnts[i] = (devcount/comm_size)+1;
displs[i] = j*sizeof(float);
j += recvcnts[i];
}
for(;i<comm_size;i++)
{
recvcnts[i] = devcount/comm_size;
displs[i] = j*sizeof(float);
j += recvcnts[i];
}
if(devcount > 0)
{
if(strcmp(MODE,"square") == 0 || strcmp(MODE,"general") == 0)
{
j=0;
for(i = myrank; i < devcount; i+=comm_size)
{
safe_call(cudaSetDevice(i),myrank,__LINE__);
safe_call(cudaGetDevice(&device),myrank,__LINE__);
if(device == i)
{
safe_call(cudaGetDeviceProperties(&devprop,device),myrank,__LINE__);
strcpy(devname,devprop.name);
h_A =(double *)malloc(RowA*ColA*sizeof(double));
h_B = (double *)malloc(ColA*ColB*sizeof(double));
h_C = (double *)malloc(RowA*ColB*sizeof(double));
h_At =(double *)malloc(RowA*ColA*sizeof(double));
h_Bt = (double *)malloc(ColA*ColB*sizeof(double));
h_Ct = (double *)malloc(RowA*ColB*sizeof(double));
if(h_A==NULL)
mem_error("h_A",RowA*ColA,"double",myrank);
if(h_B==NULL)
mem_error("h_B",ColA*ColB,"double",myrank);
if(h_C==NULL)
mem_error("h_C",RowA*ColB,"double",myrank);
if(h_At==NULL)
mem_error("h_At",RowA*ColA,"double",myrank);
if(h_Bt==NULL)
mem_error("h_Bt",ColA*ColB,"double",myrank);
if(h_Ct==NULL)
mem_error("h_Ct",RowA*ColB,"double",myrank);
fill_data(h_A,RowA*ColA);
fill_data(h_B,ColB*ColA);
transpose(h_A,h_At,RowA,ColA);
transpose(h_B,h_Bt,ColA,ColB);
safe_call(cudaEventCreate(&start),myrank,__LINE__);
safe_call(cudaEventCreate(&stop),myrank,__LINE__);
safe_call_cublas(cublasAlloc (RowA*ColA, sizeof(double), (void**)&d_A), myrank, __LINE__);
safe_call_cublas(cublasAlloc (ColA*ColB, sizeof(double), (void**)&d_B), myrank, __LINE__);
safe_call_cublas(cublasAlloc (RowA*ColB, sizeof(double), (void**)&d_C), myrank, __LINE__);
safe_call_cublas(cublasSetVector (RowA*ColA, sizeof(double), h_At, 1, d_A, 1), myrank, __LINE__);
safe_call_cublas(cublasSetVector (ColA*ColB, sizeof(double), h_Bt, 1, d_B, 1), myrank, __LINE__);
safe_call(cudaEventRecord(start, 0), myrank, __LINE__);
cublasDgemm('N','N',RowA,ColB,ColA,alpha,d_A,RowA,d_B,ColA,beta,d_C,RowA);
safe_call(cudaEventRecord (stop, 0), myrank, __LINE__);
safe_call(cudaEventSynchronize (stop), myrank, __LINE__);
safe_call_cublas(cublasGetVector (RowA*ColB, sizeof(double), d_C, 1, h_Ct, 1), myrank, __LINE__);
safe_call(cudaEventElapsedTime(&diff, start, stop), myrank, __LINE__);
time = diff *1.0e-3;
gflops=(1.0e-9 * (( 2.0 * RowA * ColB * ColA )/time));
cpu_C = (double *)malloc(RowA*ColB*sizeof(double));
if(cpu_C==NULL)
mem_error("cpu_C",RowA*ColB,"double",myrank);
cpu_matmatmul(h_A, h_B, cpu_C, RowA, ColB, ColA);
transpose(h_Ct,h_C,ColB,RowA);
if(materr=check(h_C,cpu_C,RowA,ColB)==-1)
sendbuf[j++] = gflops;
else
sendbuf[j++] = -1;
safe_call_cublas(cublasFree(d_A),myrank,__LINE__);
safe_call_cublas(cublasFree(d_B),myrank,__LINE__);
safe_call_cublas(cublasFree(d_C),myrank,__LINE__);
free(h_A);
free(h_B);
free(h_C);
free(cpu_C);
free(h_At);
free(h_Bt);
free(h_Ct);
safe_call(cudaEventDestroy(start),myrank,__LINE__);
safe_call(cudaEventDestroy(stop),myrank,__LINE__);
}
}
MPI_Gatherv(sendbuf, sendcnt, MPI_FLOAT, recvbuf, recvcnts, displs, MPI_FLOAT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
if(myrank == 0)
for(i=0;i<devcount;i++)
if(recvbuf[i]!=-1)
printf("\n\
Device %d\n\
Mode : %s\n\
Dimensions of Matrix : \n\
\t rowA : %d\n\
\t colB : %d\n\
\t colA : %d\n\
Gflops\/s : %f\n",\
i,MODE,RowA,ColB,ColA,recvbuf[i]);
else
printf("Error : CPU and GPU result do not match on Device:%d\n",device);
}
else
{
if(myrank==0)
printf("Matrix mode choices : square/general\n");
}
}
else
{
if(myrank == 0)
printf("No devices found.\n");
}
MPI_Finalize();
return 0;
}
|
7091b1f8ba09321222d51af2220b33447f333ec2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" {
// useful defines
#define Mask_width 5
#define Mask_radius Mask_width / 2
#define TILE_WIDTH 16
#define w (TILE_WIDTH + Mask_width - 1)
#define clamp(x) (min(max((x), 0.0), 1.0))
__global__ void convolution(float *I, const float *__restrict__ M, float *P,
int channels, int width, int height) {
__shared__ float N_ds[w][w];
int k;
for (k = 0; k < channels; k++) {
// First batch loading
int dest = threadIdx.y * TILE_WIDTH + threadIdx.x;
int destY = dest / w;
int destX = dest % w;
int srcY = blockIdx.y * TILE_WIDTH + destY - Mask_radius;
int srcX = blockIdx.x * TILE_WIDTH + destX - Mask_radius;
int src = (srcY * width + srcX) * channels + k;
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width) {
N_ds[destY][destX] = I[src];
} else {
N_ds[destY][destX] = 0;
}
// Second batch loading
dest = threadIdx.y * TILE_WIDTH + threadIdx.x + TILE_WIDTH * TILE_WIDTH;
destY = dest / w;
destX = dest % w;
srcY = blockIdx.y * TILE_WIDTH + destY - Mask_radius;
srcX = blockIdx.x * TILE_WIDTH + destX - Mask_radius;
src = (srcY * width + srcX) * channels + k;
if (destY < w) {
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width) {
N_ds[destY][destX] = I[src];
} else {
N_ds[destY][destX] = 0;
}
}
__syncthreads();
float accum = 0;
int y, x;
for (y = 0; y < Mask_width; y++) {
for (x = 0; x < Mask_width; x++) {
accum += N_ds[threadIdx.y + y][threadIdx.x + x]
* M[y * Mask_width + x];
}
}
y = blockIdx.y * TILE_WIDTH + threadIdx.y;
x = blockIdx.x * TILE_WIDTH + threadIdx.x;
if (y < height && x < width)
P[(y * width + x) * channels + k] = clamp(accum);
__syncthreads();
}
}
} | 7091b1f8ba09321222d51af2220b33447f333ec2.cu | extern "C" {
// useful defines
#define Mask_width 5
#define Mask_radius Mask_width / 2
#define TILE_WIDTH 16
#define w (TILE_WIDTH + Mask_width - 1)
#define clamp(x) (min(max((x), 0.0), 1.0))
__global__ void convolution(float *I, const float *__restrict__ M, float *P,
int channels, int width, int height) {
__shared__ float N_ds[w][w];
int k;
for (k = 0; k < channels; k++) {
// First batch loading
int dest = threadIdx.y * TILE_WIDTH + threadIdx.x;
int destY = dest / w;
int destX = dest % w;
int srcY = blockIdx.y * TILE_WIDTH + destY - Mask_radius;
int srcX = blockIdx.x * TILE_WIDTH + destX - Mask_radius;
int src = (srcY * width + srcX) * channels + k;
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width) {
N_ds[destY][destX] = I[src];
} else {
N_ds[destY][destX] = 0;
}
// Second batch loading
dest = threadIdx.y * TILE_WIDTH + threadIdx.x + TILE_WIDTH * TILE_WIDTH;
destY = dest / w;
destX = dest % w;
srcY = blockIdx.y * TILE_WIDTH + destY - Mask_radius;
srcX = blockIdx.x * TILE_WIDTH + destX - Mask_radius;
src = (srcY * width + srcX) * channels + k;
if (destY < w) {
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width) {
N_ds[destY][destX] = I[src];
} else {
N_ds[destY][destX] = 0;
}
}
__syncthreads();
float accum = 0;
int y, x;
for (y = 0; y < Mask_width; y++) {
for (x = 0; x < Mask_width; x++) {
accum += N_ds[threadIdx.y + y][threadIdx.x + x]
* M[y * Mask_width + x];
}
}
y = blockIdx.y * TILE_WIDTH + threadIdx.y;
x = blockIdx.x * TILE_WIDTH + threadIdx.x;
if (y < height && x < width)
P[(y * width + x) * channels + k] = clamp(accum);
__syncthreads();
}
}
} |
9b2d1a8a5c87acd71859093042c637ef000341ce.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <hip/driver_types.h>
#define BLOCK_SIZE 32
void matrixMul(float *,float *,float *,int,int);
__global__ void matrixMulKernel(float *,float *,float *,int);
int main(int argc, char * argv[]){
int i,j,nrow,ncol;//,rows,columns,i;
float *A, *B, *C;
hipSetDevice(1);
nrow = atoi(argv[1]);
ncol = atoi(argv[2]);
if((nrow != ncol) || argc != 3){
printf("Number of rows should be equal to number of columns\n");
exit(EXIT_FAILURE);
}
int size = nrow * ncol * sizeof(float);
A = (float *)malloc(size);
B = (float *)malloc(size);
C = (float *)malloc(size);
printf("size = %d x %d\n", nrow, ncol);
srand(time(NULL));
for(i=0;i<nrow;i++)
{
for(j=0;j<ncol;j++)
{
A[i*ncol+j] = (float)i;
B[i*ncol+j] = (float)i;
//B[i*ncol+j] = ((float)rand())/RAND_MAX;
//C[i*ncol+j] = ((float)rand())/RAND_MAX;
//printf("%1.0f ", B[i*ncol+j]);
}
//printf("\n");
}
matrixMul(A,B,C,nrow,ncol);
FILE *output = fopen("matrix_output.txt", "w");
if(output == NULL){
printf("A file wasn't created or located\n");
exit(EXIT_FAILURE);
}
for(i=0;i<nrow;i++)
{
for(j=0;j<ncol;j++)
{
printf("%1.1f ", C[i*ncol+j]);
//fprintf(output,"%1f ", A[i*ncol+j]);
}
//printf("\n");
//fprintf(output,"\n");
}
printf("End\n");
free(A);
free(B);
free(C);
return 0;
}
void matrixMul(float * h_A,float * h_B, float * h_C, int nrow,int ncol){
int size = nrow * ncol * sizeof(float);
float *d_A, *d_B, *d_C;
hipError_t error = hipMalloc((void **)&d_A, size);
if(error != hipSuccess){
printf("%s in %s at line %d \n", hipGetErrorString(error), __FILE__ ,__LINE__);
exit(EXIT_FAILURE);
}
hipMemcpy(d_A,h_A,size,hipMemcpyHostToDevice);
error = hipMalloc((void **)&d_B, size);
if(error != hipSuccess){
printf("%s in %s at line %d \n", hipGetErrorString(error), __FILE__ ,__LINE__);
exit(EXIT_FAILURE);
}
hipMemcpy(d_B,h_B,size,hipMemcpyHostToDevice);
error = hipMalloc((void **)&d_C, size);
if(error != hipSuccess){
printf("%s in %s at line %d \n", hipGetErrorString(error), __FILE__ ,__LINE__);
exit(EXIT_FAILURE);
}
//run kernel function with 32 threads for each block
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((nrow + BLOCK_SIZE - 1) / BLOCK_SIZE, (ncol + BLOCK_SIZE - 1) / BLOCK_SIZE);
//dim3 grid((d_C.width+block.x - 1) / block.x, (d_C.height+block.y - 1) / block.y);
hipLaunchKernelGGL(( matrixMulKernel), dim3(grid), dim3(block), 0, 0, d_A,d_B,d_C,ncol);
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
__global__
void matrixMulKernel(float * A,float * B, float * C, int n){
unsigned int i;
float product = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < n && col < n){
for (i = 0; i < n; i++)
product += A[row * n + i] * B[i * n + col];
C[row*n + col] = (float)product;
}
}
| 9b2d1a8a5c87acd71859093042c637ef000341ce.cu | #include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <driver_types.h>
#define BLOCK_SIZE 32
void matrixMul(float *,float *,float *,int,int);
__global__ void matrixMulKernel(float *,float *,float *,int);
int main(int argc, char * argv[]){
int i,j,nrow,ncol;//,rows,columns,i;
float *A, *B, *C;
cudaSetDevice(1);
nrow = atoi(argv[1]);
ncol = atoi(argv[2]);
if((nrow != ncol) || argc != 3){
printf("Number of rows should be equal to number of columns\n");
exit(EXIT_FAILURE);
}
int size = nrow * ncol * sizeof(float);
A = (float *)malloc(size);
B = (float *)malloc(size);
C = (float *)malloc(size);
printf("size = %d x %d\n", nrow, ncol);
srand(time(NULL));
for(i=0;i<nrow;i++)
{
for(j=0;j<ncol;j++)
{
A[i*ncol+j] = (float)i;
B[i*ncol+j] = (float)i;
//B[i*ncol+j] = ((float)rand())/RAND_MAX;
//C[i*ncol+j] = ((float)rand())/RAND_MAX;
//printf("%1.0f ", B[i*ncol+j]);
}
//printf("\n");
}
matrixMul(A,B,C,nrow,ncol);
FILE *output = fopen("matrix_output.txt", "w");
if(output == NULL){
printf("A file wasn't created or located\n");
exit(EXIT_FAILURE);
}
for(i=0;i<nrow;i++)
{
for(j=0;j<ncol;j++)
{
printf("%1.1f ", C[i*ncol+j]);
//fprintf(output,"%1f ", A[i*ncol+j]);
}
//printf("\n");
//fprintf(output,"\n");
}
printf("End\n");
free(A);
free(B);
free(C);
return 0;
}
void matrixMul(float * h_A,float * h_B, float * h_C, int nrow,int ncol){
int size = nrow * ncol * sizeof(float);
float *d_A, *d_B, *d_C;
cudaError_t error = cudaMalloc((void **)&d_A, size);
if(error != cudaSuccess){
printf("%s in %s at line %d \n", cudaGetErrorString(error), __FILE__ ,__LINE__);
exit(EXIT_FAILURE);
}
cudaMemcpy(d_A,h_A,size,cudaMemcpyHostToDevice);
error = cudaMalloc((void **)&d_B, size);
if(error != cudaSuccess){
printf("%s in %s at line %d \n", cudaGetErrorString(error), __FILE__ ,__LINE__);
exit(EXIT_FAILURE);
}
cudaMemcpy(d_B,h_B,size,cudaMemcpyHostToDevice);
error = cudaMalloc((void **)&d_C, size);
if(error != cudaSuccess){
printf("%s in %s at line %d \n", cudaGetErrorString(error), __FILE__ ,__LINE__);
exit(EXIT_FAILURE);
}
//run kernel function with 32 threads for each block
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((nrow + BLOCK_SIZE - 1) / BLOCK_SIZE, (ncol + BLOCK_SIZE - 1) / BLOCK_SIZE);
//dim3 grid((d_C.width+block.x - 1) / block.x, (d_C.height+block.y - 1) / block.y);
matrixMulKernel<<<grid, block>>>(d_A,d_B,d_C,ncol);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
__global__
void matrixMulKernel(float * A,float * B, float * C, int n){
unsigned int i;
float product = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < n && col < n){
for (i = 0; i < n; i++)
product += A[row * n + i] * B[i * n + col];
C[row*n + col] = (float)product;
}
}
|
e37ebc98ddea1479605e33ade52fcd19e313e82a.hip | // !!! This is a file automatically generated by hipify!!!
/*
Use "REAL" as floating point type instead of double or float
Compile with optional flag:
-DDOUBLE to use double instead of float
requires also -arch sm_21
single precision code:
> nvcc template.cu fileutils.cpp stringutils.cpp graphicstools.cpp -lcufft -o demo_single
double precision code:
> nvcc template.cu fileutils.cpp stringutils.cpp graphicstools.cpp -lcufft -DDOUBLE -arch sm_21 -o demo_single
*/
#include <stdio.h>
#include <stdlib.h> /* for rand() */
#include <unistd.h> /* for getpid() */
#include <time.h> /* for time() */
#include <math.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include "fileutils.h"
#include "stringutils.h"
#include "graphicstools.h"
// ******************************************************
#define PI 3.1415926535897932384626433832795
#define TWOPI 6.28318530717958647692528676655901
// construct REAL "type," depending on desired precision
// set the maximum number of threads
#ifdef DOUBLE
#define REAL double
#define MAXT 256
#else
#define REAL float
#define MAXT 512
#endif
typedef struct {
REAL re;
REAL im;
} COMPLEX;
// ******************************************************
//calculate the k-index in order to determine the correct k-vector for a given x,y,z-index
#define k_INDEX(i,L) ((i)<=((L)/2)?(i):((i)-(L)))
// ******************************************************
//initialize a real GPU array with a constant
__global__ void G_setrealconst(int N,REAL *a,REAL val) {
int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(idx<N) a[idx]=val;
};
//multiply two complex GPU arrays (A,B) and store result in A
__global__ void G_mulcarray(int N,COMPLEX *A,COMPLEX *B)
{
int i=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
REAL re,im,re2,im2;
if(i<N)
{
re=A[i].re;im=A[i].im;
re2=B[i].re;im2=B[i].im;
A[i].re=re*re2-im*im2;
A[i].im=im*re2+re*im2;
}
};
// ******************************************************
//execute the FFT on the GPU, zin and zout can be the same array for "in-place" FFT (a little slower)
//set "fwd" to false for inverse FFT
void G_FFT(COMPLEX *zin,COMPLEX *zout,hipfftHandle &fftPlan,bool fwd=true)
{
#ifdef DOUBLE
if(fwd) hipfftExecZ2Z(fftPlan,(hipfftDoubleComplex*) zin,(hipfftDoubleComplex*) zout,HIPFFT_FORWARD);
else hipfftExecZ2Z(fftPlan,(hipfftDoubleComplex*) zin,(hipfftDoubleComplex*) zout,HIPFFT_BACKWARD);
#else
if(fwd) hipfftExecC2C(fftPlan,(hipfftComplex*) zin,(hipfftComplex*) zout,HIPFFT_FORWARD);
else hipfftExecC2C(fftPlan,(hipfftComplex*) zin,(hipfftComplex*) zout,HIPFFT_BACKWARD);
#endif
};
// ******************************************************
//split a complex array in two real arrays containing amplitude^2 and phase
__global__ void G_ampphase(int N,COMPLEX *A,REAL* amp2,REAL* phase)
{
int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
REAL re,im;
if(idx<N)
{
re=A[idx].re;im=A[idx].im;
amp2[idx]=re*re+im*im;
phase[idx]=atan2(im,re);
}
};
//split a complex array in two real arrays containing real and imaginary parts
__global__ void G_splitreim(int N,COMPLEX *A,REAL* re,REAL* im)
{
int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(idx<N)
{
im[idx]=A[idx].im;
re[idx]=A[idx].re;
}
};
// ******************************************************
//check for a CUDA error, use argument for identification
bool cerr(const char *s="n/a")
{
hipError_t err=hipGetLastError();
if(err==hipSuccess)
return false;
printf("CUDA error [%s]: %s\n",s,hipGetErrorString(err));
return true;
};
//some function initializing a 2D complex array
__global__ void G_function(int Nx,int Ny, COMPLEX *f,REAL t) {
int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
int i,j;
if(idx<Nx*Ny) {
i=idx%Nx;
j=idx/Nx;
f[idx].re=sin(0.1*i+t)*cos(0.1*t*j);
f[idx].im=-sin(0.1*j+t)*cos(0.1*t*i);
}
};
//function to calculate the x-derivate in Fourier space
__global__ void G_kernel(int Nx, COMPLEX *f,REAL dkx) {
int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
int i,j,ki;
REAL x,y,k;
if(idx<Nx*Nx) {
i=idx%Nx;
j=idx/Nx;
//calculate the x-derivative in Fourier space
ki=k_INDEX(i,Nx); //the Fourier component
//kj=k_INDEX(j,Ny);
//multipy i*k_x to f(k_x,k_y)
k=dkx*ki;
y=k*f[idx].re;
x=-k*f[idx].im;
f[idx].re=x;
f[idx].im=y;
}
}
// ******************************************************
//outputs an NetPBM image based on a real array a, m is the minimum value of a and M the maximum, nx&ny the dimension of a and cgrad a color gradient
/* cgrad values
0: rainbow
1: rainbow 2
2: rainbow 3
3: dark rainbow
4: temperature
5: temperature 2
6: thermo
7: solar
8: sunset
9: neon
*/
void writeBM_real(string fn,REAL *a,REAL m,REAL M,int nx,int ny,int cgrad)
{
int i,n;
dcolor dcol;
unsigned int col;
REAL val,dci=1.0/(M-m);
unsigned int *rgb;
unsigned char *gray;
PXMfile *Ifile;
colorfunction *colors;
n=nx*ny;
colors= new colorfunction();
colors->selectgradient(cgrad);
rgb=new unsigned int[n];
gray=(unsigned char *) rgb;
for(i=0;i<n;i++)
{
val=(a[i]-m)*dci;
if(cgrad<1) {
col=(unsigned int) (256*val);if(col>255) col=255;
gray[i]=col;}
else {
dcol=colors->getgradientcolor(val);col=colors->get32bitcolor(dcol);
rgb[i]=col;}
}
Ifile=new PXMfile(fn,(cgrad<1?PXM_P5:PXM_P6));
Ifile->setsize(nx,ny);
if(cgrad<1) Ifile->writefile(gray,nx*ny);
else Ifile->writefile(rgb,nx*ny);
delete Ifile;
delete[] rgb;
delete colors;
};
// ******************************************************
int main(int argc,char *argv[])
{
int N,i,n,dim;
int threads,blocks;
REAL t,dt,dkx,L,mval,Mval,x;
size_t fmem,tmem;
COMPLEX *GF,*f,*Gtmp;
REAL *amp2,*phase,*Gamp2,*Gphase;
hipfftHandle fftPlan;
//welcome info
printf("template program using ");
#ifdef DOUBLE
printf("double");
#else
printf("single");
#endif
printf(" precision arithmetics.\n");
//default parameters
//assume square
dim=2;
N=256;
L=256.0;
// check if arguments are present and read them
if(argc > 1 ) N = atoi(argv[1]);
//excute
hipSetDevice(0);
hipMemGetInfo(&fmem,&tmem);
printf("GPU memory before allocation free: %lu, total: %lu\n",fmem,tmem);
threads=MAXT;
blocks=N*N/threads+(N*N%threads==0?0:1);
hipMalloc(&GF,N*N*sizeof(COMPLEX));
hipMalloc(&Gtmp,N*N*sizeof(COMPLEX));
hipMalloc(&Gamp2,N*N*sizeof(REAL));
hipMalloc(&Gphase,N*N*sizeof(REAL));
f=new COMPLEX[N*N];
amp2=new REAL[N*N];
phase=new REAL[N*N];
//for FFT
dkx=TWOPI/L;
//include normalization in dkx:
dkx=dkx/(1.0*N*N);
//we need a "plan"
#ifdef DOUBLE
if(dim==1) hipfftPlan1d(&fftPlan, N, HIPFFT_Z2Z,1);
else if(dim==2) hipfftPlan2d(&fftPlan, N, N, HIPFFT_Z2Z) ;
else if(dim==3) hipfftPlan3d(&fftPlan, N, N, N, HIPFFT_Z2Z);
#else
if(dim==1) hipfftPlan1d(&fftPlan, N, HIPFFT_C2C,1);
else if(dim==2) hipfftPlan2d(&fftPlan, N, N, HIPFFT_C2C) ;
else if(dim==3) hipfftPlan3d(&fftPlan, N, N, N, HIPFFT_C2C);
#endif
cerr("FFT plan"); //check for error
t=0.0;dt=0.1;
for(n=0;n<100;n++) {
hipLaunchKernelGGL(( G_function), dim3(blocks),dim3(threads), 0, 0, N,N,GF,t);
//output
hipLaunchKernelGGL(( G_ampphase), dim3(blocks),dim3(threads), 0, 0, N*N,GF,Gamp2,Gphase);
hipMemcpy(amp2,Gamp2,N*N*sizeof(REAL),hipMemcpyDeviceToHost);
hipMemcpy(phase,Gphase,N*N*sizeof(REAL),hipMemcpyDeviceToHost);
writeBM_real("test_amp2_"+IntToStrF(n,4),amp2,0,2,N,N,5);
writeBM_real("test_phase_"+IntToStrF(n,4),phase,-PI,PI,N,N,6);
//FFT
G_FFT(GF,Gtmp,fftPlan); //forward
hipLaunchKernelGGL(( G_kernel), dim3(blocks),dim3(threads), 0, 0, N,Gtmp,dkx);
G_FFT(Gtmp,GF,fftPlan,false); //inverse
//output
hipLaunchKernelGGL(( G_splitreim), dim3(blocks),dim3(threads), 0, 0, N*N,GF,Gamp2,Gphase);
hipMemcpy(amp2,Gamp2,N*N*sizeof(REAL),hipMemcpyDeviceToHost);
hipMemcpy(phase,Gphase,N*N*sizeof(REAL),hipMemcpyDeviceToHost);
mval=Mval=amp2[0];
for(i=1;i<N*N;i++) {x=amp2[i];if(x>Mval) Mval=x;else if(x<mval) mval=x;}
printf("%f %f; ",mval,Mval);
writeBM_real("test_dx_re_"+IntToStrF(n,4),amp2,mval,Mval,N,N,5);
mval=Mval=phase[0];
for(i=1;i<N*N;i++) {x=phase[i];if(x>Mval) Mval=x;else if(x<mval) mval=x;}
printf("%f %f\n",mval,Mval);
writeBM_real("test_dx_im_"+IntToStrF(n,4),phase,mval,Mval,N,N,5);
t+=dt;
}
delete[] f;
delete[] amp2;
delete[] phase;
hipFree(GF);
hipFree(Gtmp);
hipFree(Gamp2);
hipFree(Gphase);
return 0;
}
// ******************************************************
| e37ebc98ddea1479605e33ade52fcd19e313e82a.cu | /*
Use "REAL" as floating point type instead of double or float
Compile with optional flag:
-DDOUBLE to use double instead of float
requires also -arch sm_21
single precision code:
> nvcc template.cu fileutils.cpp stringutils.cpp graphicstools.cpp -lcufft -o demo_single
double precision code:
> nvcc template.cu fileutils.cpp stringutils.cpp graphicstools.cpp -lcufft -DDOUBLE -arch sm_21 -o demo_single
*/
#include <stdio.h>
#include <stdlib.h> /* for rand() */
#include <unistd.h> /* for getpid() */
#include <time.h> /* for time() */
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include <cufft.h>
#include "fileutils.h"
#include "stringutils.h"
#include "graphicstools.h"
// ******************************************************
#define PI 3.1415926535897932384626433832795
#define TWOPI 6.28318530717958647692528676655901
// construct REAL "type," depending on desired precision
// set the maximum number of threads
#ifdef DOUBLE
#define REAL double
#define MAXT 256
#else
#define REAL float
#define MAXT 512
#endif
typedef struct {
REAL re;
REAL im;
} COMPLEX;
// ******************************************************
//calculate the k-index in order to determine the correct k-vector for a given x,y,z-index
#define k_INDEX(i,L) ((i)<=((L)/2)?(i):((i)-(L)))
// ******************************************************
//initialize a real GPU array with a constant
__global__ void G_setrealconst(int N,REAL *a,REAL val) {
int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(idx<N) a[idx]=val;
};
//multiply two complex GPU arrays (A,B) and store result in A
__global__ void G_mulcarray(int N,COMPLEX *A,COMPLEX *B)
{
int i=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
REAL re,im,re2,im2;
if(i<N)
{
re=A[i].re;im=A[i].im;
re2=B[i].re;im2=B[i].im;
A[i].re=re*re2-im*im2;
A[i].im=im*re2+re*im2;
}
};
// ******************************************************
//execute the FFT on the GPU, zin and zout can be the same array for "in-place" FFT (a little slower)
//set "fwd" to false for inverse FFT
void G_FFT(COMPLEX *zin,COMPLEX *zout,cufftHandle &fftPlan,bool fwd=true)
{
#ifdef DOUBLE
if(fwd) cufftExecZ2Z(fftPlan,(cufftDoubleComplex*) zin,(cufftDoubleComplex*) zout,CUFFT_FORWARD);
else cufftExecZ2Z(fftPlan,(cufftDoubleComplex*) zin,(cufftDoubleComplex*) zout,CUFFT_INVERSE);
#else
if(fwd) cufftExecC2C(fftPlan,(cufftComplex*) zin,(cufftComplex*) zout,CUFFT_FORWARD);
else cufftExecC2C(fftPlan,(cufftComplex*) zin,(cufftComplex*) zout,CUFFT_INVERSE);
#endif
};
// ******************************************************
//split a complex array in two real arrays containing amplitude^2 and phase
__global__ void G_ampphase(int N,COMPLEX *A,REAL* amp2,REAL* phase)
{
int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
REAL re,im;
if(idx<N)
{
re=A[idx].re;im=A[idx].im;
amp2[idx]=re*re+im*im;
phase[idx]=atan2(im,re);
}
};
//split a complex array in two real arrays containing real and imaginary parts
__global__ void G_splitreim(int N,COMPLEX *A,REAL* re,REAL* im)
{
int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(idx<N)
{
im[idx]=A[idx].im;
re[idx]=A[idx].re;
}
};
// ******************************************************
//check for a CUDA error, use argument for identification
bool cerr(const char *s="n/a")
{
cudaError_t err=cudaGetLastError();
if(err==cudaSuccess)
return false;
printf("CUDA error [%s]: %s\n",s,cudaGetErrorString(err));
return true;
};
//some function initializing a 2D complex array
__global__ void G_function(int Nx,int Ny, COMPLEX *f,REAL t) {
int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
int i,j;
if(idx<Nx*Ny) {
i=idx%Nx;
j=idx/Nx;
f[idx].re=sin(0.1*i+t)*cos(0.1*t*j);
f[idx].im=-sin(0.1*j+t)*cos(0.1*t*i);
}
};
//function to calculate the x-derivate in Fourier space
__global__ void G_kernel(int Nx, COMPLEX *f,REAL dkx) {
int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
int i,j,ki;
REAL x,y,k;
if(idx<Nx*Nx) {
i=idx%Nx;
j=idx/Nx;
//calculate the x-derivative in Fourier space
ki=k_INDEX(i,Nx); //the Fourier component
//kj=k_INDEX(j,Ny);
//multipy i*k_x to f(k_x,k_y)
k=dkx*ki;
y=k*f[idx].re;
x=-k*f[idx].im;
f[idx].re=x;
f[idx].im=y;
}
}
// ******************************************************
//outputs an NetPBM image based on a real array a, m is the minimum value of a and M the maximum, nx&ny the dimension of a and cgrad a color gradient
/* cgrad values
0: rainbow
1: rainbow 2
2: rainbow 3
3: dark rainbow
4: temperature
5: temperature 2
6: thermo
7: solar
8: sunset
9: neon
*/
void writeBM_real(string fn,REAL *a,REAL m,REAL M,int nx,int ny,int cgrad)
{
int i,n;
dcolor dcol;
unsigned int col;
REAL val,dci=1.0/(M-m);
unsigned int *rgb;
unsigned char *gray;
PXMfile *Ifile;
colorfunction *colors;
n=nx*ny;
colors= new colorfunction();
colors->selectgradient(cgrad);
rgb=new unsigned int[n];
gray=(unsigned char *) rgb;
for(i=0;i<n;i++)
{
val=(a[i]-m)*dci;
if(cgrad<1) {
col=(unsigned int) (256*val);if(col>255) col=255;
gray[i]=col;}
else {
dcol=colors->getgradientcolor(val);col=colors->get32bitcolor(dcol);
rgb[i]=col;}
}
Ifile=new PXMfile(fn,(cgrad<1?PXM_P5:PXM_P6));
Ifile->setsize(nx,ny);
if(cgrad<1) Ifile->writefile(gray,nx*ny);
else Ifile->writefile(rgb,nx*ny);
delete Ifile;
delete[] rgb;
delete colors;
};
// ******************************************************
int main(int argc,char *argv[])
{
int N,i,n,dim;
int threads,blocks;
REAL t,dt,dkx,L,mval,Mval,x;
size_t fmem,tmem;
COMPLEX *GF,*f,*Gtmp;
REAL *amp2,*phase,*Gamp2,*Gphase;
cufftHandle fftPlan;
//welcome info
printf("template program using ");
#ifdef DOUBLE
printf("double");
#else
printf("single");
#endif
printf(" precision arithmetics.\n");
//default parameters
//assume square
dim=2;
N=256;
L=256.0;
// check if arguments are present and read them
if(argc > 1 ) N = atoi(argv[1]);
//excute
cudaSetDevice(0);
cudaMemGetInfo(&fmem,&tmem);
printf("GPU memory before allocation free: %lu, total: %lu\n",fmem,tmem);
threads=MAXT;
blocks=N*N/threads+(N*N%threads==0?0:1);
cudaMalloc(&GF,N*N*sizeof(COMPLEX));
cudaMalloc(&Gtmp,N*N*sizeof(COMPLEX));
cudaMalloc(&Gamp2,N*N*sizeof(REAL));
cudaMalloc(&Gphase,N*N*sizeof(REAL));
f=new COMPLEX[N*N];
amp2=new REAL[N*N];
phase=new REAL[N*N];
//for FFT
dkx=TWOPI/L;
//include normalization in dkx:
dkx=dkx/(1.0*N*N);
//we need a "plan"
#ifdef DOUBLE
if(dim==1) cufftPlan1d(&fftPlan, N, CUFFT_Z2Z,1);
else if(dim==2) cufftPlan2d(&fftPlan, N, N, CUFFT_Z2Z) ;
else if(dim==3) cufftPlan3d(&fftPlan, N, N, N, CUFFT_Z2Z);
#else
if(dim==1) cufftPlan1d(&fftPlan, N, CUFFT_C2C,1);
else if(dim==2) cufftPlan2d(&fftPlan, N, N, CUFFT_C2C) ;
else if(dim==3) cufftPlan3d(&fftPlan, N, N, N, CUFFT_C2C);
#endif
cerr("FFT plan"); //check for error
t=0.0;dt=0.1;
for(n=0;n<100;n++) {
G_function<<<blocks,threads>>>(N,N,GF,t);
//output
G_ampphase<<<blocks,threads>>>(N*N,GF,Gamp2,Gphase);
cudaMemcpy(amp2,Gamp2,N*N*sizeof(REAL),cudaMemcpyDeviceToHost);
cudaMemcpy(phase,Gphase,N*N*sizeof(REAL),cudaMemcpyDeviceToHost);
writeBM_real("test_amp2_"+IntToStrF(n,4),amp2,0,2,N,N,5);
writeBM_real("test_phase_"+IntToStrF(n,4),phase,-PI,PI,N,N,6);
//FFT
G_FFT(GF,Gtmp,fftPlan); //forward
G_kernel<<<blocks,threads>>>(N,Gtmp,dkx);
G_FFT(Gtmp,GF,fftPlan,false); //inverse
//output
G_splitreim<<<blocks,threads>>>(N*N,GF,Gamp2,Gphase);
cudaMemcpy(amp2,Gamp2,N*N*sizeof(REAL),cudaMemcpyDeviceToHost);
cudaMemcpy(phase,Gphase,N*N*sizeof(REAL),cudaMemcpyDeviceToHost);
mval=Mval=amp2[0];
for(i=1;i<N*N;i++) {x=amp2[i];if(x>Mval) Mval=x;else if(x<mval) mval=x;}
printf("%f %f; ",mval,Mval);
writeBM_real("test_dx_re_"+IntToStrF(n,4),amp2,mval,Mval,N,N,5);
mval=Mval=phase[0];
for(i=1;i<N*N;i++) {x=phase[i];if(x>Mval) Mval=x;else if(x<mval) mval=x;}
printf("%f %f\n",mval,Mval);
writeBM_real("test_dx_im_"+IntToStrF(n,4),phase,mval,Mval,N,N,5);
t+=dt;
}
delete[] f;
delete[] amp2;
delete[] phase;
cudaFree(GF);
cudaFree(Gtmp);
cudaFree(Gamp2);
cudaFree(Gphase);
return 0;
}
// ******************************************************
|
d8bcce7d771ff317fb6665d746094df178bf136e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "get_hausdorff_dis_gpu.h"
#include "cuda_utils.h"
#define gt_num 42
#define voxel_dim 31
#define dict_grid_num (voxel_dim*voxel_dim*voxel_dim)
#define prior_point_num 10
__global__ void get_hausdorff_dis_kernel_fast(const float *__restrict__ whole_points,
const float *__restrict__ keypoints,
const float *__restrict__ neighbor_points,
float *__restrict__ features, float radius,
int batch_size, int whole_point_num,
int keypoint_num, int neighbor_point_num,
const float* __restrict__ prior_points,
const float* __restrict__ dis_dicts,
float voxel_len, hipStream_t stream){
// whole_points: B N C
// keypoints: B M C
// neighbor_points: B M nsample C
// prior_points: Nshapes Npoints_per_shape Cxyz
// dis_dicts: Nshapes Ngrid Cxyz
// output:
// features: batch_size Nshapes point_num
// dim3 blocks(DIVUP(point_num*gt_num, THREADS_PER_BLOCK), batch_size);
// dim3 threads(gt_num, DIVUP(THREADS_PER_BLOCK, gt_num));
// dim3 blocks(keypoint_num/16, batch_size);
// dim3 threads(gt_num, 16);
int batch_idx = blockIdx.y;
// int point_idx = blockIdx.x * blockDim.y + threadIdx.y;
int point_idx = blockIdx.x * 16 + threadIdx.y;
int gt_idx = threadIdx.x;
printf("blockDim.x %d\n", blockDim.x);
printf("blockDim.y %d\n", blockDim.y);
printf("gridDim.x %d\n", gridDim.x);
printf("gridDim.y %d\n", gridDim.y);
printf("threadIdx.x %d\n", threadIdx.x);
printf("threadIdx.y %d\n", threadIdx.y);
pirntf("blockIdx.x %d\n", blockIdx.x);
pirntf("blockIdx.y %d\n", blockIdx.y);
printf("point_idx %d\n", point_idx);
printf("gt_idx %d\n", gt_idx);
// keypoints = batch_idx * keypoint_num * 3 + point_idx * 3;
// whole_points += batch_idx * whole_point_num * 3;
neighbor_points += batch_idx * keypoint_num * neighbor_point_num * 3 + point_idx * neighbor_point_num * 3;
features += batch_idx * keypoint_num * gt_num + point_idx * gt_num + gt_idx;
dis_dicts += gt_idx * dict_grid_num;
prior_points += gt_idx * prior_point_num * 3;
// float r2 = radius * radius;
// float keypoint_x = keypoints[0];
// float keypoint_y = keypoints[1];
// float keypoint_z = keypoints[2];
float to_prior_dis = 0;
float tmp_dis;
int xth, yth, zth;
int i;
int prior_hash_idx;
for( i = 0; i < neighbor_point_num; i++ ){
xth = floor(abs(neighbor_points[i*3 + 0] + radius) / voxel_len);
yth = floor(abs(neighbor_points[i*3 + 1] + radius) / voxel_len);
zth = floor(abs(neighbor_points[i*3 + 2] + radius) / voxel_len);
prior_hash_idx = xth + yth * voxel_dim + zth * voxel_dim * voxel_dim;
tmp_dis = dis_dicts[prior_hash_idx];
if( to_prior_dis < tmp_dis ){
to_prior_dis = tmp_dis;
}
}
float prior_to_dis = 0;
float min_point_pair_dis;
int j;
for( i = 0; i < prior_point_num; i++ ){
min_point_pair_dis = 99.9;
for( j = 0; j < neighbor_point_num; j++ ){
tmp_dis = ( pow(prior_points[i*3 + 0] - neighbor_points[j*3 + 0], 2) +
pow(prior_points[i*3 + 1] - neighbor_points[j*3 + 1], 2) +
pow(prior_points[i*3 + 2] - neighbor_points[j*3 + 2], 2) );
if( min_point_pair_dis > tmp_dis ){
min_point_pair_dis = tmp_dis;
}
}
if( min_point_pair_dis > prior_to_dis ){
prior_to_dis = min_point_pair_dis;
}
}
prior_to_dis = sqrt(prior_to_dis);
float hsdf_dis = prior_to_dis > to_prior_dis? prior_to_dis : to_prior_dis;
*features = hsdf_dis > radius? 1 : hsdf_dis / radius;
}
void get_hausdorff_dis_kernel_launcher_fast(const float* whole_points, const float* keypoints,
const float* neighbor_points,
float* features, float radius,
int batch_size, int whole_point_num, int keypoint_num,
int neighbor_point_num,
const float* prior_points, const float* dis_dicts,
float voxel_len, hipStream_t stream){
// whole_points: B N C
// keypoints: B N C
// neighbor_points: B N nsample C
// prior_points: Nshapes Npoints_per_shape Cxyz
// dis_dicts: Nshapes N_hash_grid_per_shape Cxyz
// output:
// features: batch_size point_num Nshapes
hipError_t err;
// dim3 blocks(DIVUP(point_num, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row)
// dim3 threads(THREADS_PER_BLOCK);
// ball_query_kernel_fast<<<blocks, threads, 0, stream>>>(b, n, m, radius, nsample, new_xyz, xyz, idx);
// dim3 blocks(DIVUP(keypoint_num*gt_num, THREADS_PER_BLOCK), batch_size);
// dim3 threads(gt_num, DIVUP(THREADS_PER_BLOCK, gt_num));
dim3 blocks(keypoint_num/16, batch_size);
dim3 threads(gt_num, 16);
printf("get_hausdorff_dis_kernel_fast\n");
hipLaunchKernelGGL(( get_hausdorff_dis_kernel_fast), dim3(blocks), dim3(threads), 0, stream,
whole_points, keypoints, neighbor_points, features, radius, batch_size, whole_point_num,
keypoint_num, neighbor_point_num, prior_points, dis_dicts, voxel_len, stream);
printf("END get_hausdorff_dis_kernel_fast\n");
hipDeviceSynchronize(); // for using printf in kernel function
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
| d8bcce7d771ff317fb6665d746094df178bf136e.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "get_hausdorff_dis_gpu.h"
#include "cuda_utils.h"
#define gt_num 42
#define voxel_dim 31
#define dict_grid_num (voxel_dim*voxel_dim*voxel_dim)
#define prior_point_num 10
__global__ void get_hausdorff_dis_kernel_fast(const float *__restrict__ whole_points,
const float *__restrict__ keypoints,
const float *__restrict__ neighbor_points,
float *__restrict__ features, float radius,
int batch_size, int whole_point_num,
int keypoint_num, int neighbor_point_num,
const float* __restrict__ prior_points,
const float* __restrict__ dis_dicts,
float voxel_len, cudaStream_t stream){
// whole_points: B N C
// keypoints: B M C
// neighbor_points: B M nsample C
// prior_points: Nshapes Npoints_per_shape Cxyz
// dis_dicts: Nshapes Ngrid Cxyz
// output:
// features: batch_size Nshapes point_num
// dim3 blocks(DIVUP(point_num*gt_num, THREADS_PER_BLOCK), batch_size);
// dim3 threads(gt_num, DIVUP(THREADS_PER_BLOCK, gt_num));
// dim3 blocks(keypoint_num/16, batch_size);
// dim3 threads(gt_num, 16);
int batch_idx = blockIdx.y;
// int point_idx = blockIdx.x * blockDim.y + threadIdx.y;
int point_idx = blockIdx.x * 16 + threadIdx.y;
int gt_idx = threadIdx.x;
printf("blockDim.x %d\n", blockDim.x);
printf("blockDim.y %d\n", blockDim.y);
printf("gridDim.x %d\n", gridDim.x);
printf("gridDim.y %d\n", gridDim.y);
printf("threadIdx.x %d\n", threadIdx.x);
printf("threadIdx.y %d\n", threadIdx.y);
pirntf("blockIdx.x %d\n", blockIdx.x);
pirntf("blockIdx.y %d\n", blockIdx.y);
printf("point_idx %d\n", point_idx);
printf("gt_idx %d\n", gt_idx);
// keypoints = batch_idx * keypoint_num * 3 + point_idx * 3;
// whole_points += batch_idx * whole_point_num * 3;
neighbor_points += batch_idx * keypoint_num * neighbor_point_num * 3 + point_idx * neighbor_point_num * 3;
features += batch_idx * keypoint_num * gt_num + point_idx * gt_num + gt_idx;
dis_dicts += gt_idx * dict_grid_num;
prior_points += gt_idx * prior_point_num * 3;
// float r2 = radius * radius;
// float keypoint_x = keypoints[0];
// float keypoint_y = keypoints[1];
// float keypoint_z = keypoints[2];
float to_prior_dis = 0;
float tmp_dis;
int xth, yth, zth;
int i;
int prior_hash_idx;
for( i = 0; i < neighbor_point_num; i++ ){
xth = floor(abs(neighbor_points[i*3 + 0] + radius) / voxel_len);
yth = floor(abs(neighbor_points[i*3 + 1] + radius) / voxel_len);
zth = floor(abs(neighbor_points[i*3 + 2] + radius) / voxel_len);
prior_hash_idx = xth + yth * voxel_dim + zth * voxel_dim * voxel_dim;
tmp_dis = dis_dicts[prior_hash_idx];
if( to_prior_dis < tmp_dis ){
to_prior_dis = tmp_dis;
}
}
float prior_to_dis = 0;
float min_point_pair_dis;
int j;
for( i = 0; i < prior_point_num; i++ ){
min_point_pair_dis = 99.9;
for( j = 0; j < neighbor_point_num; j++ ){
tmp_dis = ( pow(prior_points[i*3 + 0] - neighbor_points[j*3 + 0], 2) +
pow(prior_points[i*3 + 1] - neighbor_points[j*3 + 1], 2) +
pow(prior_points[i*3 + 2] - neighbor_points[j*3 + 2], 2) );
if( min_point_pair_dis > tmp_dis ){
min_point_pair_dis = tmp_dis;
}
}
if( min_point_pair_dis > prior_to_dis ){
prior_to_dis = min_point_pair_dis;
}
}
prior_to_dis = sqrt(prior_to_dis);
float hsdf_dis = prior_to_dis > to_prior_dis? prior_to_dis : to_prior_dis;
*features = hsdf_dis > radius? 1 : hsdf_dis / radius;
}
void get_hausdorff_dis_kernel_launcher_fast(const float* whole_points, const float* keypoints,
const float* neighbor_points,
float* features, float radius,
int batch_size, int whole_point_num, int keypoint_num,
int neighbor_point_num,
const float* prior_points, const float* dis_dicts,
float voxel_len, cudaStream_t stream){
// whole_points: B N C
// keypoints: B N C
// neighbor_points: B N nsample C
// prior_points: Nshapes Npoints_per_shape Cxyz
// dis_dicts: Nshapes N_hash_grid_per_shape Cxyz
// output:
// features: batch_size point_num Nshapes
cudaError_t err;
// dim3 blocks(DIVUP(point_num, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row)
// dim3 threads(THREADS_PER_BLOCK);
// ball_query_kernel_fast<<<blocks, threads, 0, stream>>>(b, n, m, radius, nsample, new_xyz, xyz, idx);
// dim3 blocks(DIVUP(keypoint_num*gt_num, THREADS_PER_BLOCK), batch_size);
// dim3 threads(gt_num, DIVUP(THREADS_PER_BLOCK, gt_num));
dim3 blocks(keypoint_num/16, batch_size);
dim3 threads(gt_num, 16);
printf("get_hausdorff_dis_kernel_fast\n");
get_hausdorff_dis_kernel_fast<<<blocks, threads, 0, stream>>>(
whole_points, keypoints, neighbor_points, features, radius, batch_size, whole_point_num,
keypoint_num, neighbor_point_num, prior_points, dis_dicts, voxel_len, stream);
printf("END get_hausdorff_dis_kernel_fast\n");
cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
3c4100a0f869c441cde1ef4bbe4472a59207ef22.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator-(const hipComplex& a) {
return hipComplex(r-a.r, i-a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
__device__ hipComplex operator/(const hipComplex& a) {
return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ hipComplex conj(hipComplex m)
{
hipComplex out(m.r,-m.i);
return out;
}
__device__ hipComplex nor(hipComplex m)
{
hipComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(hipComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ hipComplex qpoch(hipComplex a, hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex qp(hipComplex a, hipComplex q, int n) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex ramphi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ hipComplex rampsi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ hipComplex ramchi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ hipComplex ramf(hipComplex a, hipComplex b) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex ma = mone*a;
hipComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ hipComplex expc(hipComplex m)
{
hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ hipComplex powc(hipComplex ag, hipComplex bg)
{
hipComplex out(0.0,0.0);
hipComplex mesp(0.0,0.0);
hipComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ hipComplex cosc(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.5,0.0);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ hipComplex sins(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.0,0.5);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ hipComplex tans(hipComplex m)
{
return sins(m)/cosc(m);
}
__device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z)
{
hipComplex out(0.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ hipComplex bnewt(hipComplex z) {
hipComplex three(3.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex Z =z;
hipComplex L(0.0,0.0);
hipComplex R(0.62348980185873359,0.7818314824680298);
hipComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ hipComplex they3(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex wahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ hipComplex dwahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ hipComplex they3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex h3ey3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex aut(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
hipComplex vel(0.0,0.0);
hipComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ hipComplex thess(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the1(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ hipComplex the2(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ hipComplex the3(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ hipComplex qin(hipComplex a, hipComplex q)
{
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ hipComplex geffa(hipComplex z, hipComplex q)
{
hipComplex out(0.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex wu(0.0,0.0);
hipComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ hipComplex thratd(hipComplex z, hipComplex q)
{
int n;
hipComplex fau(4.0,0.0);
hipComplex too(2.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex ennn(1.0,0.0);
hipComplex ni(-1.0,0.0);
hipComplex noo(-1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex loo = q;
hipComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ hipComplex thess4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ hipComplex thass(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex rogers( hipComplex q)
{
hipComplex onf(0.2,0.0);
hipComplex Q5 = q*q*q*q*q;
hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ hipComplex flat(hipComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
hipComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ hipComplex eff(hipComplex z, hipComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ hipComplex thete(float R, hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
hipComplex ann(1.0,0.0);
hipComplex bnn(1.0,0.0);
hipComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ hipComplex thetta(hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the hipComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ hipComplex mitlef(hipComplex z,hipComplex c)
{
hipComplex out(0.0,0.0);
hipComplex Z(1.0,0.0);
hipComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ hipComplex helva(hipComplex z)
{
hipComplex out(j0f(z.r),j1f(z.i));
return out;
}
__device__ hipComplex hilva(hipComplex z)
{
hipComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ hipComplex hinva(hipComplex z)
{
hipComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ hipComplex henga(hipComplex z)
{
hipComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ hipComplex holva(hipComplex z)
{
hipComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ hipComplex arago(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex irigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex urigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex stalanxva(hipComplex z, float K)
{
/* packed standard map */
hipComplex out(0.0,0.0);
out.r = floor((z.r + K * sinf(z.i))/6.28318530717958)*6.28318530717958;
out.i = floor((z.i + out.r)/6.28318530717958 )*6.28318530717958;
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
hipComplex ip(pi,0.0);
const float scale = 20;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
hipComplex effx(fx,0.0);
hipComplex effy(fy,0.0);
float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
hipComplex mouse(LA,LB);
hipComplex moux(LA,0.0);
hipComplex mouy(0.0,LB);
hipComplex q(fx,fy);
/* hipComplex tik(sin(ticks/40.0f),0.0);*/
/* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
hipComplex fixon(.029348,.828934);
hipComplex faxon(.029348,-.828934);
hipComplex unity(1.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex aon = expc(ai*moux);
hipComplex uon= expc(mouy);
hipComplex flurn(0.0,0.0);
hipComplex accume(1.0,0.0);
hipComplex eccume(0.0,0.0);
hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
hipComplex cue = q;
hipComplex lam(0.73736887807831963, -0.67549029426152396);
hipComplex due(3.0,0.0);
hipComplex tir(2.0,0.0);
hipComplex selga(3.5,0.0);
hipComplex vro(-1.0,0.0);
hipComplex tle(1.0,0.0);
hipComplex sle(4.0,0.0);
hipComplex cherra(0.62348980185873359, 0.7818314824680298);
hipComplex lerra = cherra*cherra;
hipComplex ferra = lerra * cherra;
hipComplex terra = ferra * cherra;
hipComplex zerra = terra * cherra;
hipComplex nerra = zerra * cherra;
hipComplex vlarv(1/3.0,0.0);
hipComplex sugna(0.70710678118654757, 0.70710678118654746);
hipComplex regna(0.99966573338968745, 0.025853848581176047);
hipComplex spa(sqrtf(2.0),0.0);
hipComplex spb(sqrtf(3.0),0.0);
hipComplex spc(sqrtf(4.0),0.0);
hipComplex spd(sqrtf(5.0),0.0);
hipComplex mrun(1/2.0,0.0);
hipComplex gloon (4.0,0.0);
hipComplex plenod(-.01,0.0);
hipComplex nue = cue;
hipComplex bor(-10.0,0.0);
hipComplex nat(0.0,-10.0);
hipComplex rhus(1.0,0.0);
hipComplex D(0.739085133215160641655312087674,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
for(v=0;v<20;v++)
{
cue = stalanxva(cue,moux.r);
/*accume = accume + urigo(powc(cue * aon - conj(cue*uon),aon),uon*fixon);*/
}
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/ | 3c4100a0f869c441cde1ef4bbe4472a59207ef22.cu | #include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator-(const cuComplex& a) {
return cuComplex(r-a.r, i-a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
__device__ cuComplex operator/(const cuComplex& a) {
return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ cuComplex conj(cuComplex m)
{
cuComplex out(m.r,-m.i);
return out;
}
__device__ cuComplex nor(cuComplex m)
{
cuComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(cuComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ cuComplex qpoch(cuComplex a, cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex qp(cuComplex a, cuComplex q, int n) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex ramphi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ cuComplex rampsi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ cuComplex ramchi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ cuComplex ramf(cuComplex a, cuComplex b) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex ma = mone*a;
cuComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ cuComplex expc(cuComplex m)
{
cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ cuComplex powc(cuComplex ag, cuComplex bg)
{
cuComplex out(0.0,0.0);
cuComplex mesp(0.0,0.0);
cuComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ cuComplex cosc(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.5,0.0);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ cuComplex sins(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.0,0.5);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ cuComplex tans(cuComplex m)
{
return sins(m)/cosc(m);
}
__device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z)
{
cuComplex out(0.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ cuComplex bnewt(cuComplex z) {
cuComplex three(3.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex Z =z;
cuComplex L(0.0,0.0);
cuComplex R(0.62348980185873359,0.7818314824680298);
cuComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ cuComplex they3(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex wahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ cuComplex dwahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ cuComplex they3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex h3ey3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex aut(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
cuComplex vel(0.0,0.0);
cuComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ cuComplex thess(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the1(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ cuComplex the2(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ cuComplex the3(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ cuComplex qin(cuComplex a, cuComplex q)
{
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ cuComplex geffa(cuComplex z, cuComplex q)
{
cuComplex out(0.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex wu(0.0,0.0);
cuComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ cuComplex thratd(cuComplex z, cuComplex q)
{
int n;
cuComplex fau(4.0,0.0);
cuComplex too(2.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex ennn(1.0,0.0);
cuComplex ni(-1.0,0.0);
cuComplex noo(-1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex loo = q;
cuComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ cuComplex thess4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ cuComplex thass(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex rogers( cuComplex q)
{
cuComplex onf(0.2,0.0);
cuComplex Q5 = q*q*q*q*q;
cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ cuComplex flat(cuComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
cuComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ cuComplex eff(cuComplex z, cuComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ cuComplex thete(float R, cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
cuComplex ann(1.0,0.0);
cuComplex bnn(1.0,0.0);
cuComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ cuComplex thetta(cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the cuComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ cuComplex mitlef(cuComplex z,cuComplex c)
{
cuComplex out(0.0,0.0);
cuComplex Z(1.0,0.0);
cuComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ cuComplex helva(cuComplex z)
{
cuComplex out(j0f(z.r),j1f(z.i));
return out;
}
__device__ cuComplex hilva(cuComplex z)
{
cuComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ cuComplex hinva(cuComplex z)
{
cuComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ cuComplex henga(cuComplex z)
{
cuComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ cuComplex holva(cuComplex z)
{
cuComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ cuComplex arago(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex irigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex urigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex stalanxva(cuComplex z, float K)
{
/* packed standard map */
cuComplex out(0.0,0.0);
out.r = floor((z.r + K * sinf(z.i))/6.28318530717958)*6.28318530717958;
out.i = floor((z.i + out.r)/6.28318530717958 )*6.28318530717958;
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
cuComplex ip(pi,0.0);
const float scale = 20;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
cuComplex effx(fx,0.0);
cuComplex effy(fy,0.0);
float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
cuComplex mouse(LA,LB);
cuComplex moux(LA,0.0);
cuComplex mouy(0.0,LB);
cuComplex q(fx,fy);
/* cuComplex tik(sin(ticks/40.0f),0.0);*/
/* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
cuComplex fixon(.029348,.828934);
cuComplex faxon(.029348,-.828934);
cuComplex unity(1.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex aon = expc(ai*moux);
cuComplex uon= expc(mouy);
cuComplex flurn(0.0,0.0);
cuComplex accume(1.0,0.0);
cuComplex eccume(0.0,0.0);
cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
cuComplex cue = q;
cuComplex lam(0.73736887807831963, -0.67549029426152396);
cuComplex due(3.0,0.0);
cuComplex tir(2.0,0.0);
cuComplex selga(3.5,0.0);
cuComplex vro(-1.0,0.0);
cuComplex tle(1.0,0.0);
cuComplex sle(4.0,0.0);
cuComplex cherra(0.62348980185873359, 0.7818314824680298);
cuComplex lerra = cherra*cherra;
cuComplex ferra = lerra * cherra;
cuComplex terra = ferra * cherra;
cuComplex zerra = terra * cherra;
cuComplex nerra = zerra * cherra;
cuComplex vlarv(1/3.0,0.0);
cuComplex sugna(0.70710678118654757, 0.70710678118654746);
cuComplex regna(0.99966573338968745, 0.025853848581176047);
cuComplex spa(sqrtf(2.0),0.0);
cuComplex spb(sqrtf(3.0),0.0);
cuComplex spc(sqrtf(4.0),0.0);
cuComplex spd(sqrtf(5.0),0.0);
cuComplex mrun(1/2.0,0.0);
cuComplex gloon (4.0,0.0);
cuComplex plenod(-.01,0.0);
cuComplex nue = cue;
cuComplex bor(-10.0,0.0);
cuComplex nat(0.0,-10.0);
cuComplex rhus(1.0,0.0);
cuComplex D(0.739085133215160641655312087674,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
for(v=0;v<20;v++)
{
cue = stalanxva(cue,moux.r);
/*accume = accume + urigo(powc(cue * aon - conj(cue*uon),aon),uon*fixon);*/
}
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/ |
83c11941a6e38d79ebd8841dacb7f20d4343bb2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "bmp_util.c"
/* Riferimenti utili:
* https://developer.nvidia.com/blog/even-easier-introduction-cuda/
* https://developer.nvidia.com/blog/cuda-pro-tip-write-flexible-kernels-grid-stride-loops/
*/
static void HandleError(hipError_t err, const char *file, int line){
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define indexx(i, j, N) ((i)*(N)) + (j)
__device__ void atomicMin(float* const address, const float value)
{
if (*address <= value)
{
return;
}
int* const addressAsI = (int*)address;
int old = *addressAsI, assumed;
do
{
assumed = old;
if (__int_as_float(assumed) <= value)
{
break;
}
old = atomicCAS(addressAsI, assumed, __float_as_int(value));
} while (assumed != old);
}
__global__ void getMin(const float* __restrict__ input, const int size, const int sizeM, float* minOut, int* minIdxOut, int* minIdyOut)
{
__shared__ float sharedMin;
__shared__ int sharedMinIdx;
__shared__ int sharedMinIdy;
if (0 == threadIdx.x && threadIdx.y == 0)
{
sharedMin = input[threadIdx.x];
sharedMinIdx = 0;
sharedMinIdy = 0;
}
__syncthreads();
float localMin = input[0];
int localMinIdx = 0;
int localMinIdy = 0;
for (int i = blockIdx.y * blockDim.y + threadIdx.y ; i < size; i += blockDim.y)
{
for (int j = threadIdx.x + blockIdx.x * blockDim.x; j < sizeM; j+=blockDim.x) {
float val = input[i * sizeM + j];
if (localMin > abs(val)){
localMin = abs(val);
localMinIdx = j;
localMinIdy = i;
}
}
}
// Funzione Atomica per il minimo con valori float
atomicMin(&sharedMin, localMin);
__syncthreads();
if (sharedMin == localMin)
{
sharedMinIdx = localMinIdx;
sharedMinIdy = localMinIdy;
}
__syncthreads();
if (0 == threadIdx.x && threadIdx.y == 0)
{
minOut[blockIdx.x] = sharedMin;
minIdxOut[blockIdx.x] = sharedMinIdx;
minIdyOut[blockIdx.x] = sharedMinIdy;
}
}
__global__ void getMatch(float *I, float *T, int Iw, int Ih, int Tw, int Th, float *differences) {
float diff = 0;
float temp;
int k, l;
int i = 0;
int j = 0;
extern __shared__ float s_template[];
// Copio l'immagine template in shared memory (impongo che la dimensione del template deve essere pi piccola della shared memory, il test si effettua nel main)
for (i = threadIdx.y; i <Th; i+=blockDim.y ) {
for (j = threadIdx.x; j <Tw; j+=blockDim.x) {
s_template[j+Tw*i] = T[j+Tw*i];
}
}
__syncthreads();
// grid-stride-loop
for (i = blockIdx.y * blockDim.y + threadIdx.y; i <= Ih - Th; i+=blockDim.y * gridDim.y ) {
for (j = blockIdx.x * blockDim.x + threadIdx.x; j <= Iw - Tw; j+=blockDim.x * gridDim.x ) {
// Ogni Threads esegue questa parte
for (k = 0; k < Th; k++) {
for (l = 0; l < Tw; l++) {
temp = I[((l + j) + (k + i)*Iw)] - s_template[l + k*Tw]; // SAD
diff += fabsf(temp); // Valore assoluto
}
}
// Fine threads
differences[j + i * (Iw - Tw + 1)] = diff;
diff = 0;
}
}
}
int main(int argc, char *argv[]){
clock_t start = clock();
// Durante i test ho notato che le cudamalloc() richiedevano molto tempo
// allora ho fatto una ricerca e suggeriscono di inserire un cudafree() all'inizio per far prepare prima il contesto Cuda ed in effetti ha funzionato
// https://forums.developer.nvidia.com/t/cudamalloc-slow/40238/3
hipFree(0);
if(argc != 4 ){
printf("Numero di argomenti non validi!\n");
printf("Aggiungere: immagine di origine, template e immagine di destinazione \n");
exit(0);
}
int origine_width, origine_height, template_width, template_height, maxThreads, *index, *indexY, *d_index, *d_indexY;
float *d_origine, *d_template, *differences, *d_differences, *output, *d_output;
hipDeviceProp_t prop;
float *origine = ReadBMP(argv[1], &origine_width, &origine_height);
float *templat = ReadBMP(argv[2], &template_width, &template_height);
// Recupero le propriet del device
HANDLE_ERROR( hipGetDeviceProperties(&prop, 0));
// Recupero il numero massimo di Threds della scheda
maxThreads = prop.maxThreadsPerBlock;
// Controllo che la dimensione del template sia minore della shared memory
if( prop.sharedMemPerBlock < sizeof(float) *template_width * template_height){
printf("Dimensione del template troppo grande rispetto allo spazio in Shared Memory \n");
exit(0);
}
// Calcolo la dimensione della matrice differences che conterr le differenze
int differenceW = (origine_width - template_width + 1);
int differenceH = (origine_height - template_height + 1);
differences = (float *)malloc( sizeof(float) * differenceW * differenceH);
int dimThreadsPerBlock = maxThreads/32; // multiplo di 32, max 1024 (Dipende dalla scheda)
dim3 threadsPerBlock(dimThreadsPerBlock,dimThreadsPerBlock);
// Tanti blocchi quanti bastano a coprire ogni cella di differences tramite 1 thread (performance migliori)
// Devo per allocare un numero giusto o maggiore di threads, mai minore ed essendo una divisione tra numeri interni vengono scartati i decimali, allora aggiungo maxThreads-1
// Es. 13 + (6-1) / 6 = 18 -> 18/6 = 3 Quindi 3 blocchi da 6 threads Totale 18
int blocks = (differenceW * differenceH + maxThreads - 1) / maxThreads;
// Ogni blocco calcola un minimo nella ricerca
size_t output_size = blocks;
output = (float *) malloc(output_size*sizeof(float));
index = (int *) malloc(output_size*sizeof(int));
indexY = (int *) malloc(output_size*sizeof(int));
// Alloco spazio per l'immagine origine sulla GPU
HANDLE_ERROR( hipMalloc((void**)&d_origine, sizeof(float)*origine_width * origine_height));
// Alloco spazio per il template sulla GPU
HANDLE_ERROR( hipMalloc((void**)&d_template, sizeof(float) * template_width * template_height));
// Alloco spazio per le differenze sulla GPU
HANDLE_ERROR( hipMalloc((void**)&d_differences, sizeof(float) * differenceW * differenceH));
// Alloco spazio per l'indice X sulla GPU
HANDLE_ERROR(hipMalloc( (void**)&d_index,output_size*sizeof(int)));
// Alloco spazio per l'indice Y sulla GPU
HANDLE_ERROR(hipMalloc( (void**)&d_indexY,output_size*sizeof(int)));
// Alloco spazio per il valore minimo sulla GPU (TEST)
HANDLE_ERROR(hipMalloc( (void**)&d_output,output_size*sizeof(float)));
// Copio l'immagine origine dalla CPU alla GPU
HANDLE_ERROR(hipMemcpy(d_origine, origine, sizeof(float)*origine_width * origine_height, hipMemcpyHostToDevice));
// Copio il template dalla CPU alla GPU
HANDLE_ERROR(hipMemcpy(d_template, templat, sizeof(float)*template_width*template_height, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( getMatch), dim3(blocks),dim3(threadsPerBlock),sizeof(float) * template_width * template_height, 0, d_origine, d_template,
origine_width, origine_height, template_width, template_height, d_differences);
hipDeviceSynchronize();
hipLaunchKernelGGL(( getMin), dim3(blocks),dim3(threadsPerBlock), 0, 0, d_differences, differenceH, differenceW, d_output, d_index, d_indexY);
hipDeviceSynchronize();
// Copio da GPU a CPU l'indice X
HANDLE_ERROR( hipMemcpy(index,d_index,output_size*sizeof(int),hipMemcpyDeviceToHost));
// Copio da GPU a CPU l'indice Y
HANDLE_ERROR( hipMemcpy(indexY,d_indexY,output_size*sizeof(int),hipMemcpyDeviceToHost));
// Copio da GPU a CPU il valore minimo (TEST)
HANDLE_ERROR( hipMemcpy(output,d_output,output_size*sizeof(float),hipMemcpyDeviceToHost));
// Per Test
//printf("minimo: %f\n", output[0]);
//printf("X: %i\n", index[0]);
//printf("Y: %i\n", indexY[0]);
free(differences);
hipFree(d_origine);
hipFree(d_template);
hipFree(d_differences);
hipFree(d_output);
hipFree(d_index);
hipFree(d_indexY);
int x1, x2, y1, y2;
x1 = index[0];
x2 = index[0] + template_width - 1;
y1 = indexY[0];
y2 = indexY[0] + template_height - 1;
MarkAndSave(argv[1], x1, y1, x2, y2, argv[3]);
printf("Percorso immagine risultante: %s\n", argv[3]);
clock_t end = clock();
printf("Tempo di esecuzione = %f secondi \n", ((double)(end - start)) / CLOCKS_PER_SEC);
} | 83c11941a6e38d79ebd8841dacb7f20d4343bb2c.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "bmp_util.c"
/* Riferimenti utili:
* https://developer.nvidia.com/blog/even-easier-introduction-cuda/
* https://developer.nvidia.com/blog/cuda-pro-tip-write-flexible-kernels-grid-stride-loops/
*/
static void HandleError(cudaError_t err, const char *file, int line){
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define indexx(i, j, N) ((i)*(N)) + (j)
__device__ void atomicMin(float* const address, const float value)
{
if (*address <= value)
{
return;
}
int* const addressAsI = (int*)address;
int old = *addressAsI, assumed;
do
{
assumed = old;
if (__int_as_float(assumed) <= value)
{
break;
}
old = atomicCAS(addressAsI, assumed, __float_as_int(value));
} while (assumed != old);
}
__global__ void getMin(const float* __restrict__ input, const int size, const int sizeM, float* minOut, int* minIdxOut, int* minIdyOut)
{
__shared__ float sharedMin;
__shared__ int sharedMinIdx;
__shared__ int sharedMinIdy;
if (0 == threadIdx.x && threadIdx.y == 0)
{
sharedMin = input[threadIdx.x];
sharedMinIdx = 0;
sharedMinIdy = 0;
}
__syncthreads();
float localMin = input[0];
int localMinIdx = 0;
int localMinIdy = 0;
for (int i = blockIdx.y * blockDim.y + threadIdx.y ; i < size; i += blockDim.y)
{
for (int j = threadIdx.x + blockIdx.x * blockDim.x; j < sizeM; j+=blockDim.x) {
float val = input[i * sizeM + j];
if (localMin > abs(val)){
localMin = abs(val);
localMinIdx = j;
localMinIdy = i;
}
}
}
// Funzione Atomica per il minimo con valori float
atomicMin(&sharedMin, localMin);
__syncthreads();
if (sharedMin == localMin)
{
sharedMinIdx = localMinIdx;
sharedMinIdy = localMinIdy;
}
__syncthreads();
if (0 == threadIdx.x && threadIdx.y == 0)
{
minOut[blockIdx.x] = sharedMin;
minIdxOut[blockIdx.x] = sharedMinIdx;
minIdyOut[blockIdx.x] = sharedMinIdy;
}
}
__global__ void getMatch(float *I, float *T, int Iw, int Ih, int Tw, int Th, float *differences) {
float diff = 0;
float temp;
int k, l;
int i = 0;
int j = 0;
extern __shared__ float s_template[];
// Copio l'immagine template in shared memory (impongo che la dimensione del template deve essere più piccola della shared memory, il test si effettua nel main)
for (i = threadIdx.y; i <Th; i+=blockDim.y ) {
for (j = threadIdx.x; j <Tw; j+=blockDim.x) {
s_template[j+Tw*i] = T[j+Tw*i];
}
}
__syncthreads();
// grid-stride-loop
for (i = blockIdx.y * blockDim.y + threadIdx.y; i <= Ih - Th; i+=blockDim.y * gridDim.y ) {
for (j = blockIdx.x * blockDim.x + threadIdx.x; j <= Iw - Tw; j+=blockDim.x * gridDim.x ) {
// Ogni Threads esegue questa parte
for (k = 0; k < Th; k++) {
for (l = 0; l < Tw; l++) {
temp = I[((l + j) + (k + i)*Iw)] - s_template[l + k*Tw]; // SAD
diff += fabsf(temp); // Valore assoluto
}
}
// Fine threads
differences[j + i * (Iw - Tw + 1)] = diff;
diff = 0;
}
}
}
int main(int argc, char *argv[]){
clock_t start = clock();
// Durante i test ho notato che le cudamalloc() richiedevano molto tempo
// allora ho fatto una ricerca e suggeriscono di inserire un cudafree() all'inizio per far prepare prima il contesto Cuda ed in effetti ha funzionato
// https://forums.developer.nvidia.com/t/cudamalloc-slow/40238/3
cudaFree(0);
if(argc != 4 ){
printf("Numero di argomenti non validi!\n");
printf("Aggiungere: immagine di origine, template e immagine di destinazione \n");
exit(0);
}
int origine_width, origine_height, template_width, template_height, maxThreads, *index, *indexY, *d_index, *d_indexY;
float *d_origine, *d_template, *differences, *d_differences, *output, *d_output;
cudaDeviceProp prop;
float *origine = ReadBMP(argv[1], &origine_width, &origine_height);
float *templat = ReadBMP(argv[2], &template_width, &template_height);
// Recupero le proprietà del device
HANDLE_ERROR( cudaGetDeviceProperties(&prop, 0));
// Recupero il numero massimo di Threds della scheda
maxThreads = prop.maxThreadsPerBlock;
// Controllo che la dimensione del template sia minore della shared memory
if( prop.sharedMemPerBlock < sizeof(float) *template_width * template_height){
printf("Dimensione del template troppo grande rispetto allo spazio in Shared Memory \n");
exit(0);
}
// Calcolo la dimensione della matrice differences che conterrà le differenze
int differenceW = (origine_width - template_width + 1);
int differenceH = (origine_height - template_height + 1);
differences = (float *)malloc( sizeof(float) * differenceW * differenceH);
int dimThreadsPerBlock = maxThreads/32; // multiplo di 32, max 1024 (Dipende dalla scheda)
dim3 threadsPerBlock(dimThreadsPerBlock,dimThreadsPerBlock);
// Tanti blocchi quanti bastano a coprire ogni cella di differences tramite 1 thread (performance migliori)
// Devo però allocare un numero giusto o maggiore di threads, mai minore ed essendo una divisione tra numeri interni vengono scartati i decimali, allora aggiungo maxThreads-1
// Es. 13 + (6-1) / 6 = 18 -> 18/6 = 3 Quindi 3 blocchi da 6 threads Totale 18
int blocks = (differenceW * differenceH + maxThreads - 1) / maxThreads;
// Ogni blocco calcola un minimo nella ricerca
size_t output_size = blocks;
output = (float *) malloc(output_size*sizeof(float));
index = (int *) malloc(output_size*sizeof(int));
indexY = (int *) malloc(output_size*sizeof(int));
// Alloco spazio per l'immagine origine sulla GPU
HANDLE_ERROR( cudaMalloc((void**)&d_origine, sizeof(float)*origine_width * origine_height));
// Alloco spazio per il template sulla GPU
HANDLE_ERROR( cudaMalloc((void**)&d_template, sizeof(float) * template_width * template_height));
// Alloco spazio per le differenze sulla GPU
HANDLE_ERROR( cudaMalloc((void**)&d_differences, sizeof(float) * differenceW * differenceH));
// Alloco spazio per l'indice X sulla GPU
HANDLE_ERROR(cudaMalloc( (void**)&d_index,output_size*sizeof(int)));
// Alloco spazio per l'indice Y sulla GPU
HANDLE_ERROR(cudaMalloc( (void**)&d_indexY,output_size*sizeof(int)));
// Alloco spazio per il valore minimo sulla GPU (TEST)
HANDLE_ERROR(cudaMalloc( (void**)&d_output,output_size*sizeof(float)));
// Copio l'immagine origine dalla CPU alla GPU
HANDLE_ERROR(cudaMemcpy(d_origine, origine, sizeof(float)*origine_width * origine_height, cudaMemcpyHostToDevice));
// Copio il template dalla CPU alla GPU
HANDLE_ERROR(cudaMemcpy(d_template, templat, sizeof(float)*template_width*template_height, cudaMemcpyHostToDevice));
getMatch<<<blocks,threadsPerBlock,sizeof(float) * template_width * template_height>>>(d_origine, d_template,
origine_width, origine_height, template_width, template_height, d_differences);
cudaDeviceSynchronize();
getMin<<<blocks,threadsPerBlock>>>(d_differences, differenceH, differenceW, d_output, d_index, d_indexY);
cudaDeviceSynchronize();
// Copio da GPU a CPU l'indice X
HANDLE_ERROR( cudaMemcpy(index,d_index,output_size*sizeof(int),cudaMemcpyDeviceToHost));
// Copio da GPU a CPU l'indice Y
HANDLE_ERROR( cudaMemcpy(indexY,d_indexY,output_size*sizeof(int),cudaMemcpyDeviceToHost));
// Copio da GPU a CPU il valore minimo (TEST)
HANDLE_ERROR( cudaMemcpy(output,d_output,output_size*sizeof(float),cudaMemcpyDeviceToHost));
// Per Test
//printf("minimo: %f\n", output[0]);
//printf("X: %i\n", index[0]);
//printf("Y: %i\n", indexY[0]);
free(differences);
cudaFree(d_origine);
cudaFree(d_template);
cudaFree(d_differences);
cudaFree(d_output);
cudaFree(d_index);
cudaFree(d_indexY);
int x1, x2, y1, y2;
x1 = index[0];
x2 = index[0] + template_width - 1;
y1 = indexY[0];
y2 = indexY[0] + template_height - 1;
MarkAndSave(argv[1], x1, y1, x2, y2, argv[3]);
printf("Percorso immagine risultante: %s\n", argv[3]);
clock_t end = clock();
printf("Tempo di esecuzione = %f secondi \n", ((double)(end - start)) / CLOCKS_PER_SEC);
} |
85bd7663792fa5e89841fbe11a9db9fb379c9c92.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <string.h>
#include <cstdlib>
#include <stdlib.h>
#include <stdio.h>
#include <iomanip>
#include <fstream>
#include <sstream>
#include <list>
#include <utility>
#include <math.h>
#include <limits>
#include <ctime>
#define N 20000
using namespace std;
__global__ void reduce(int *g_idata, int* g_odata, int* sdata)
{
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if(tid<544){
for(int j=0; j<20 ; j++){
sdata[tid] = sdata[tid] + g_idata[i+1024*j];
}
}else{
for(int j=0; j<19 ; j++){
sdata[tid] = sdata[tid] + g_idata[i+1024*j];
}
}
__syncthreads();
// do reduction in shared mem
for(unsigned int s=1; s < blockDim.x; s *= 2) {
if ((tid % (2*s)) == 0){
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0){
g_odata[blockIdx.x] = sdata[tid];
}
}
int* load_data1(){
int* input_data = new int[N];
for(int i=0;i<N;i++){
input_data[i] = i;
}
return(input_data);
}
int* load_data(const char* input_file){
int* answer = new int[N];
FILE* fp = fopen(input_file,"r");
int element;
int i = 0;
while( fscanf(fp, "%d", &element) != EOF )
{
answer[i] = element;
i++;
}
fclose(fp);
return(answer);
}
int main(){
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
int* h_idata;
int* h_odata; /* host data*/
int* d_idata;
int* d_odata; /* device data*/
int* subh_xdata;
int* subd_xdata;
int numThreadsperBlock = 1024;
int numBlocks = 1;
int size1 = N * sizeof( int );
int size2 = numBlocks * sizeof(int);
int size3 = numThreadsperBlock*sizeof(int);
hipMalloc( (void **) &d_idata, size1 );
hipMalloc( (void **) &d_odata, size2);
hipMalloc( (void **) &subd_xdata, size3);
h_idata = (int *)malloc( size1 );
h_odata = (int *)malloc( size2);
subh_xdata = (int *)malloc( size3);
const char* name = "reduce_data.txt";
h_idata = load_data(name);
for(int i=0; i<1024; i++){
subh_xdata[i] = 0;
}
/* copying inputs to device memory */
hipMemcpy(d_idata, h_idata, size1, hipMemcpyHostToDevice) ;
hipMemcpy(d_odata, h_odata, size2, hipMemcpyHostToDevice) ;
hipMemcpy(subd_xdata, subh_xdata, size3, hipMemcpyHostToDevice) ;
dim3 dimBlock(numThreadsperBlock, 1, 1); dim3 dimGrid(numBlocks, 1, 1);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( reduce), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_idata, d_odata, subd_xdata);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
int result;
hipMemcpy( h_odata, d_odata, size2, hipMemcpyDeviceToHost );
hipMemcpy( &result, d_odata, size2, hipMemcpyDeviceToHost );
cout << "sum is " << result << endl;
hipEventElapsedTime(&time, start, stop);
printf ("Time for the kernel: %f ms\n", time);
free(h_idata);
free(h_odata);
free(subh_xdata);
hipFree(d_idata);
hipFree(d_odata);
hipFree(subd_xdata);
} | 85bd7663792fa5e89841fbe11a9db9fb379c9c92.cu | #include <iostream>
#include <string.h>
#include <cstdlib>
#include <stdlib.h>
#include <stdio.h>
#include <iomanip>
#include <fstream>
#include <sstream>
#include <list>
#include <utility>
#include <math.h>
#include <limits>
#include <ctime>
#define N 20000
using namespace std;
__global__ void reduce(int *g_idata, int* g_odata, int* sdata)
{
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if(tid<544){
for(int j=0; j<20 ; j++){
sdata[tid] = sdata[tid] + g_idata[i+1024*j];
}
}else{
for(int j=0; j<19 ; j++){
sdata[tid] = sdata[tid] + g_idata[i+1024*j];
}
}
__syncthreads();
// do reduction in shared mem
for(unsigned int s=1; s < blockDim.x; s *= 2) {
if ((tid % (2*s)) == 0){
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0){
g_odata[blockIdx.x] = sdata[tid];
}
}
int* load_data1(){
int* input_data = new int[N];
for(int i=0;i<N;i++){
input_data[i] = i;
}
return(input_data);
}
int* load_data(const char* input_file){
int* answer = new int[N];
FILE* fp = fopen(input_file,"r");
int element;
int i = 0;
while( fscanf(fp, "%d", &element) != EOF )
{
answer[i] = element;
i++;
}
fclose(fp);
return(answer);
}
int main(){
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int* h_idata;
int* h_odata; /* host data*/
int* d_idata;
int* d_odata; /* device data*/
int* subh_xdata;
int* subd_xdata;
int numThreadsperBlock = 1024;
int numBlocks = 1;
int size1 = N * sizeof( int );
int size2 = numBlocks * sizeof(int);
int size3 = numThreadsperBlock*sizeof(int);
cudaMalloc( (void **) &d_idata, size1 );
cudaMalloc( (void **) &d_odata, size2);
cudaMalloc( (void **) &subd_xdata, size3);
h_idata = (int *)malloc( size1 );
h_odata = (int *)malloc( size2);
subh_xdata = (int *)malloc( size3);
const char* name = "reduce_data.txt";
h_idata = load_data(name);
for(int i=0; i<1024; i++){
subh_xdata[i] = 0;
}
/* copying inputs to device memory */
cudaMemcpy(d_idata, h_idata, size1, cudaMemcpyHostToDevice) ;
cudaMemcpy(d_odata, h_odata, size2, cudaMemcpyHostToDevice) ;
cudaMemcpy(subd_xdata, subh_xdata, size3, cudaMemcpyHostToDevice) ;
dim3 dimBlock(numThreadsperBlock, 1, 1); dim3 dimGrid(numBlocks, 1, 1);
cudaEventRecord(start, 0);
reduce<<< dimGrid, dimBlock >>>(d_idata, d_odata, subd_xdata);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
int result;
cudaMemcpy( h_odata, d_odata, size2, cudaMemcpyDeviceToHost );
cudaMemcpy( &result, d_odata, size2, cudaMemcpyDeviceToHost );
cout << "sum is " << result << endl;
cudaEventElapsedTime(&time, start, stop);
printf ("Time for the kernel: %f ms\n", time);
free(h_idata);
free(h_odata);
free(subh_xdata);
cudaFree(d_idata);
cudaFree(d_odata);
cudaFree(subd_xdata);
} |
6185921978539a5b3ebb4ac828d1a857a651bcd3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2021, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/matrix/hybrid_kernels.hpp"
#include <ginkgo/core/base/exception_helpers.hpp>
#include <ginkgo/core/matrix/ell.hpp>
#include "core/components/fill_array.hpp"
#include "core/components/prefix_sum.hpp"
#include "core/matrix/coo_kernels.hpp"
#include "core/matrix/ell_kernels.hpp"
#include "cuda/base/config.hpp"
#include "cuda/base/types.hpp"
#include "cuda/components/atomic.cuh"
#include "cuda/components/cooperative_groups.cuh"
#include "cuda/components/format_conversion.cuh"
#include "cuda/components/reduction.cuh"
#include "cuda/components/segment_scan.cuh"
#include "cuda/components/thread_ids.cuh"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The Hybrid matrix format namespace.
*
* @ingroup hybrid
*/
namespace hybrid {
constexpr int default_block_size = 512;
constexpr int warps_in_block = 4;
#include "common/cuda_hip/matrix/hybrid_kernels.hpp.inc"
template <typename ValueType, typename IndexType>
void convert_to_dense(std::shared_ptr<const CudaExecutor> exec,
const matrix::Hybrid<ValueType, IndexType>* source,
matrix::Dense<ValueType>* result) GKO_NOT_IMPLEMENTED;
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_HYBRID_CONVERT_TO_DENSE_KERNEL);
template <typename ValueType, typename IndexType>
void convert_to_csr(std::shared_ptr<const CudaExecutor> exec,
const matrix::Hybrid<ValueType, IndexType>* source,
matrix::Csr<ValueType, IndexType>* result)
{
const auto num_rows = source->get_size()[0];
auto coo_offset = Array<IndexType>(exec, num_rows + 1);
auto coo_val = source->get_const_coo_values();
auto coo_col = source->get_const_coo_col_idxs();
auto coo_row = source->get_const_coo_row_idxs();
auto ell_val = source->get_const_ell_values();
auto ell_col = source->get_const_ell_col_idxs();
const auto stride = source->get_ell_stride();
const auto max_nnz_per_row = source->get_ell_num_stored_elements_per_row();
const auto coo_num_stored_elements = source->get_coo_num_stored_elements();
// Compute the row offset of Coo without zeros
size_type grid_num = ceildiv(coo_num_stored_elements, default_block_size);
hipLaunchKernelGGL(( coo::kernel::convert_row_idxs_to_ptrs), dim3(grid_num), dim3(default_block_size), 0, 0,
as_cuda_type(coo_row), coo_num_stored_elements,
as_cuda_type(coo_offset.get_data()), num_rows + 1);
// Compute the row ptrs of Csr
auto row_ptrs = result->get_row_ptrs();
auto coo_row_ptrs = Array<IndexType>(exec, num_rows);
components::fill_array(exec, row_ptrs, num_rows + 1, zero<IndexType>());
grid_num = ceildiv(num_rows, warps_in_block);
hipLaunchKernelGGL(( ell::kernel::count_nnz_per_row), dim3(grid_num), dim3(default_block_size), 0, 0,
num_rows, max_nnz_per_row, stride, as_cuda_type(ell_val),
as_cuda_type(row_ptrs));
components::fill_array(exec, coo_row_ptrs.get_data(), num_rows,
zero<IndexType>());
auto nwarps =
coo::host_kernel::calculate_nwarps(exec, coo_num_stored_elements);
if (nwarps > 0) {
int num_lines =
ceildiv(coo_num_stored_elements, nwarps * config::warp_size);
const dim3 coo_block(config::warp_size, warps_in_block, 1);
const dim3 coo_grid(ceildiv(nwarps, warps_in_block), 1);
hipLaunchKernelGGL(( kernel::count_coo_row_nnz), dim3(coo_grid), dim3(coo_block), 0, 0,
coo_num_stored_elements, num_lines, as_cuda_type(coo_val),
as_cuda_type(coo_row), as_cuda_type(coo_row_ptrs.get_data()));
}
hipLaunchKernelGGL(( kernel::add), dim3(grid_num), dim3(default_block_size), 0, 0,
num_rows, as_cuda_type(row_ptrs),
as_cuda_type(coo_row_ptrs.get_const_data()));
components::prefix_sum(exec, row_ptrs, num_rows + 1);
// Fill the value
grid_num = ceildiv(num_rows, default_block_size);
hipLaunchKernelGGL(( kernel::fill_in_csr), dim3(grid_num), dim3(default_block_size), 0, 0,
num_rows, max_nnz_per_row, stride, as_cuda_type(ell_val),
as_cuda_type(ell_col), as_cuda_type(coo_val), as_cuda_type(coo_col),
as_cuda_type(coo_offset.get_const_data()), as_cuda_type(row_ptrs),
as_cuda_type(result->get_col_idxs()),
as_cuda_type(result->get_values()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_HYBRID_CONVERT_TO_CSR_KERNEL);
template <typename ValueType, typename IndexType>
void count_nonzeros(std::shared_ptr<const CudaExecutor> exec,
const matrix::Hybrid<ValueType, IndexType>* source,
size_type* result)
{
size_type ell_nnz = 0;
size_type coo_nnz = 0;
ell::count_nonzeros(exec, source->get_ell(), &ell_nnz);
auto nnz = source->get_coo_num_stored_elements();
auto nwarps = coo::host_kernel::calculate_nwarps(exec, nnz);
if (nwarps > 0) {
int num_lines = ceildiv(nnz, nwarps * config::warp_size);
const dim3 coo_block(config::warp_size, warps_in_block, 1);
const dim3 coo_grid(ceildiv(nwarps, warps_in_block), 1);
const auto num_rows = source->get_size()[0];
auto nnz_per_row = Array<IndexType>(exec, num_rows);
components::fill_array(exec, nnz_per_row.get_data(), num_rows,
zero<IndexType>());
hipLaunchKernelGGL(( kernel::count_coo_row_nnz), dim3(coo_grid), dim3(coo_block), 0, 0,
nnz, num_lines, as_cuda_type(source->get_coo()->get_const_values()),
as_cuda_type(source->get_coo()->get_const_row_idxs()),
as_cuda_type(nnz_per_row.get_data()));
coo_nnz =
reduce_add_array(exec, num_rows, nnz_per_row.get_const_data());
}
*result = ell_nnz + coo_nnz;
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_HYBRID_COUNT_NONZEROS_KERNEL);
} // namespace hybrid
} // namespace cuda
} // namespace kernels
} // namespace gko
| 6185921978539a5b3ebb4ac828d1a857a651bcd3.cu | /*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2021, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/matrix/hybrid_kernels.hpp"
#include <ginkgo/core/base/exception_helpers.hpp>
#include <ginkgo/core/matrix/ell.hpp>
#include "core/components/fill_array.hpp"
#include "core/components/prefix_sum.hpp"
#include "core/matrix/coo_kernels.hpp"
#include "core/matrix/ell_kernels.hpp"
#include "cuda/base/config.hpp"
#include "cuda/base/types.hpp"
#include "cuda/components/atomic.cuh"
#include "cuda/components/cooperative_groups.cuh"
#include "cuda/components/format_conversion.cuh"
#include "cuda/components/reduction.cuh"
#include "cuda/components/segment_scan.cuh"
#include "cuda/components/thread_ids.cuh"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The Hybrid matrix format namespace.
*
* @ingroup hybrid
*/
namespace hybrid {
constexpr int default_block_size = 512;
constexpr int warps_in_block = 4;
#include "common/cuda_hip/matrix/hybrid_kernels.hpp.inc"
template <typename ValueType, typename IndexType>
void convert_to_dense(std::shared_ptr<const CudaExecutor> exec,
const matrix::Hybrid<ValueType, IndexType>* source,
matrix::Dense<ValueType>* result) GKO_NOT_IMPLEMENTED;
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_HYBRID_CONVERT_TO_DENSE_KERNEL);
template <typename ValueType, typename IndexType>
void convert_to_csr(std::shared_ptr<const CudaExecutor> exec,
const matrix::Hybrid<ValueType, IndexType>* source,
matrix::Csr<ValueType, IndexType>* result)
{
const auto num_rows = source->get_size()[0];
auto coo_offset = Array<IndexType>(exec, num_rows + 1);
auto coo_val = source->get_const_coo_values();
auto coo_col = source->get_const_coo_col_idxs();
auto coo_row = source->get_const_coo_row_idxs();
auto ell_val = source->get_const_ell_values();
auto ell_col = source->get_const_ell_col_idxs();
const auto stride = source->get_ell_stride();
const auto max_nnz_per_row = source->get_ell_num_stored_elements_per_row();
const auto coo_num_stored_elements = source->get_coo_num_stored_elements();
// Compute the row offset of Coo without zeros
size_type grid_num = ceildiv(coo_num_stored_elements, default_block_size);
coo::kernel::convert_row_idxs_to_ptrs<<<grid_num, default_block_size>>>(
as_cuda_type(coo_row), coo_num_stored_elements,
as_cuda_type(coo_offset.get_data()), num_rows + 1);
// Compute the row ptrs of Csr
auto row_ptrs = result->get_row_ptrs();
auto coo_row_ptrs = Array<IndexType>(exec, num_rows);
components::fill_array(exec, row_ptrs, num_rows + 1, zero<IndexType>());
grid_num = ceildiv(num_rows, warps_in_block);
ell::kernel::count_nnz_per_row<<<grid_num, default_block_size>>>(
num_rows, max_nnz_per_row, stride, as_cuda_type(ell_val),
as_cuda_type(row_ptrs));
components::fill_array(exec, coo_row_ptrs.get_data(), num_rows,
zero<IndexType>());
auto nwarps =
coo::host_kernel::calculate_nwarps(exec, coo_num_stored_elements);
if (nwarps > 0) {
int num_lines =
ceildiv(coo_num_stored_elements, nwarps * config::warp_size);
const dim3 coo_block(config::warp_size, warps_in_block, 1);
const dim3 coo_grid(ceildiv(nwarps, warps_in_block), 1);
kernel::count_coo_row_nnz<<<coo_grid, coo_block>>>(
coo_num_stored_elements, num_lines, as_cuda_type(coo_val),
as_cuda_type(coo_row), as_cuda_type(coo_row_ptrs.get_data()));
}
kernel::add<<<grid_num, default_block_size>>>(
num_rows, as_cuda_type(row_ptrs),
as_cuda_type(coo_row_ptrs.get_const_data()));
components::prefix_sum(exec, row_ptrs, num_rows + 1);
// Fill the value
grid_num = ceildiv(num_rows, default_block_size);
kernel::fill_in_csr<<<grid_num, default_block_size>>>(
num_rows, max_nnz_per_row, stride, as_cuda_type(ell_val),
as_cuda_type(ell_col), as_cuda_type(coo_val), as_cuda_type(coo_col),
as_cuda_type(coo_offset.get_const_data()), as_cuda_type(row_ptrs),
as_cuda_type(result->get_col_idxs()),
as_cuda_type(result->get_values()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_HYBRID_CONVERT_TO_CSR_KERNEL);
template <typename ValueType, typename IndexType>
void count_nonzeros(std::shared_ptr<const CudaExecutor> exec,
const matrix::Hybrid<ValueType, IndexType>* source,
size_type* result)
{
size_type ell_nnz = 0;
size_type coo_nnz = 0;
ell::count_nonzeros(exec, source->get_ell(), &ell_nnz);
auto nnz = source->get_coo_num_stored_elements();
auto nwarps = coo::host_kernel::calculate_nwarps(exec, nnz);
if (nwarps > 0) {
int num_lines = ceildiv(nnz, nwarps * config::warp_size);
const dim3 coo_block(config::warp_size, warps_in_block, 1);
const dim3 coo_grid(ceildiv(nwarps, warps_in_block), 1);
const auto num_rows = source->get_size()[0];
auto nnz_per_row = Array<IndexType>(exec, num_rows);
components::fill_array(exec, nnz_per_row.get_data(), num_rows,
zero<IndexType>());
kernel::count_coo_row_nnz<<<coo_grid, coo_block>>>(
nnz, num_lines, as_cuda_type(source->get_coo()->get_const_values()),
as_cuda_type(source->get_coo()->get_const_row_idxs()),
as_cuda_type(nnz_per_row.get_data()));
coo_nnz =
reduce_add_array(exec, num_rows, nnz_per_row.get_const_data());
}
*result = ell_nnz + coo_nnz;
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_HYBRID_COUNT_NONZEROS_KERNEL);
} // namespace hybrid
} // namespace cuda
} // namespace kernels
} // namespace gko
|
7711cb5fbd29278fe078f6ccafc31dbe2b561c0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/funcs/impl/cuda/saber_fc.h"
#include "saber/funcs/calibrate.h"
#include "sass_funcs.h"
namespace anakin{
namespace saber{
template <typename dtype>
__global__ void add_bias(int n, int output_size, const dtype* bias, dtype* dout) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int bias_index = index % output_size;
if (index < n) {
dout[index] = dout[index] + bias[bias_index];
}
}
template <>
SaberStatus SaberFc<NV, AK_FLOAT>::create(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
FcParam<NV>& param, Context<NV>& ctx){
if (!(&ctx == this->_ctx)) {
this->_ctx = &ctx;
}
Shape shape_out = inputs[0]->valid_shape();
_M = inputs[0]->count_valid(0, param.axis);
_K = inputs[0]->count_valid(param.axis, inputs[0]->dims());
_N = param.num_output;
_flag_trans_weights = param.is_transpose_weights;
if (_N <= 0) {
int weight_size = param.weights->valid_size();
_N = weight_size / _K;
}
//! weights dims must be in h and w
_gemm->init(false, !_flag_trans_weights, _M, _N, _K, *_ctx);
return SaberSuccess;
}
template <>
SaberStatus SaberFc<NV, AK_FLOAT>::init(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
FcParam<NV>& param, Context<NV> &ctx) {
// get context
this->_ctx = &ctx;
int generate_arch = Env<NV>::cur_env()[_ctx->get_device_id()]._info._generate_arch;
bool arch_check = (generate_arch == 50) || (generate_arch == 61);
if (arch_check) {
_gemm = new Gemm<NV, SABER_IMPL, float, float>;
} else {
_gemm = new Gemm<NV, VENDER_IMPL, float, float>;
}
return create(inputs, outputs, param, ctx);
}
template <>
SaberStatus SaberFc<NV, AK_INT8>::create(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
FcParam<NV>& param, Context<NV>& ctx){
if (!(&ctx == this->_ctx)) {
this->_ctx = &ctx;
}
Shape shape_out = inputs[0]->valid_shape();
_M = inputs[0]->count_valid(0, param.axis);
_K = inputs[0]->count_valid(param.axis, inputs[0]->dims());
_N = param.num_output;
_flag_trans_weights = param.is_transpose_weights;
if (_N <= 0) {
int weight_size = param.weights->valid_size();
_N = weight_size / _K;
}
//! weights dims must be in h and w
_gemm_s8f32->init(false, !_flag_trans_weights, _M, _N, _K, *_ctx);
return SaberSuccess;
}
template <>
SaberStatus SaberFc<NV, AK_INT8>::init(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
FcParam<NV>& param, Context<NV> &ctx) {
// get context
this->_ctx = &ctx;
int generate_arch = Env<NV>::cur_env()[_ctx->get_device_id()]._info._generate_arch;
bool arch_check = generate_arch == 61;
if (arch_check) {
_gemm_s8f32 = new Gemm<NV, VENDER_IMPL, char, float>;
if (param.weights->get_dtype() == AK_FLOAT) {
Tensor<NVHX86> _host_weight;
_trans_weight.re_alloc(param.weights->valid_shape(), AK_INT8);
_host_weight.re_alloc(param.weights->valid_shape(), AK_FLOAT);
_host_weight.copy_from(*param.weights);
std::vector<float> scale;
get_tensor_scale(scale, _host_weight, 0, false);
param.weights->set_scale(scale);
_trans_weight.set_scale(scale);
flatten_calibrate<NV, char, float>(_trans_weight, *param.weights, ctx);
_trans_weight.record_event(ctx.get_compute_stream());
_trans_weight.sync();
}
} else {
LOG(FATAL) << "not support this arch!! ";
}
return create(inputs, outputs, param, ctx);
}
template <>
SaberStatus SaberFc<NV, AK_INT8>::dispatch(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs, FcParam<NV>& param) {
hipStream_t stream = this->_ctx->get_compute_stream();
const char *din = (const char *)inputs[0]->data();
float *dout = (float *)outputs[0]->mutable_data();
const char *weight = nullptr;
if (param.weights->get_dtype() == AK_INT8) {
weight = (const char *)param.weights->data();
} else {
weight = (const char *)_trans_weight.data();
}
const float *bias = nullptr;
bool bias_term = param.bias != nullptr;
if (bias_term) {
bias = (const float *)param.bias->data();
}
_inner_tensor.re_alloc(inputs[0]->valid_shape(), AK_INT8);
layout_trans_nchwc4_2_nchw(_inner_tensor, *inputs[0],
inputs[0]->get_scale()[0], *_ctx);
din = (const char*)_inner_tensor.data();
float beta = 0.f;
float alpha = 1.f;
if (param.weights->get_scale().size() == 1) {
CHECK_GE(inputs[0]->get_scale().size(), 1);
alpha = inputs[0]->get_scale()[0] * param.weights->get_scale()[0];
}
if (outputs[0]->get_dtype() == AK_INT8) {
LOG(FATAL) << " this is not right!";
// CHECK_GE(outputs[0]->get_scale().size(), 1);
// alpha /= outputs[0]->get_scale()[0];
}
_gemm_s8f32->dispatch(alpha, beta, din, weight, dout);
if (bias_term) {
int total_size = _M * _N;
hipLaunchKernelGGL(( add_bias<float>), dim3(CUDA_GET_BLOCKS(total_size)), dim3(CUDA_NUM_THREADS), 0, stream, \
total_size, _N, bias, dout);
}
return SaberSuccess;
}
template <>
SaberStatus SaberFc<NV, AK_FLOAT>::dispatch(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
FcParam<NV>& param) {
hipStream_t stream = this->_ctx->get_compute_stream();
const float *din = (const float *)inputs[0]->data();
float *dout = (float *)outputs[0]->mutable_data();
const float *weight = (float *)param.weights->data();
const float *bias = nullptr;
bool bias_term = param.bias != nullptr;
if (bias_term) {
bias = (const float *)param.bias->data();
}
float alpha = 1.f;
float beta = 0.f;
_gemm->dispatch(alpha, beta, din, weight, dout);
if (bias_term) {
int total_size = _M * _N;
hipLaunchKernelGGL(( add_bias<float>), dim3(CUDA_GET_BLOCKS(total_size)), dim3(CUDA_NUM_THREADS), 0, stream, \
total_size, _N, bias, dout);
}
return SaberSuccess;
}
template class SaberFc<NV, AK_INT8>;
template class SaberFc<NV, AK_FLOAT>;
DEFINE_OP_TEMPLATE(SaberFc, FcParam, NV, AK_HALF);
} //namespace anakin
} //namespace anakin
| 7711cb5fbd29278fe078f6ccafc31dbe2b561c0d.cu | #include "saber/funcs/impl/cuda/saber_fc.h"
#include "saber/funcs/calibrate.h"
#include "sass_funcs.h"
namespace anakin{
namespace saber{
template <typename dtype>
__global__ void add_bias(int n, int output_size, const dtype* bias, dtype* dout) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int bias_index = index % output_size;
if (index < n) {
dout[index] = dout[index] + bias[bias_index];
}
}
template <>
SaberStatus SaberFc<NV, AK_FLOAT>::create(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
FcParam<NV>& param, Context<NV>& ctx){
if (!(&ctx == this->_ctx)) {
this->_ctx = &ctx;
}
Shape shape_out = inputs[0]->valid_shape();
_M = inputs[0]->count_valid(0, param.axis);
_K = inputs[0]->count_valid(param.axis, inputs[0]->dims());
_N = param.num_output;
_flag_trans_weights = param.is_transpose_weights;
if (_N <= 0) {
int weight_size = param.weights->valid_size();
_N = weight_size / _K;
}
//! weights dims must be in h and w
_gemm->init(false, !_flag_trans_weights, _M, _N, _K, *_ctx);
return SaberSuccess;
}
template <>
SaberStatus SaberFc<NV, AK_FLOAT>::init(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
FcParam<NV>& param, Context<NV> &ctx) {
// get context
this->_ctx = &ctx;
int generate_arch = Env<NV>::cur_env()[_ctx->get_device_id()]._info._generate_arch;
bool arch_check = (generate_arch == 50) || (generate_arch == 61);
if (arch_check) {
_gemm = new Gemm<NV, SABER_IMPL, float, float>;
} else {
_gemm = new Gemm<NV, VENDER_IMPL, float, float>;
}
return create(inputs, outputs, param, ctx);
}
template <>
SaberStatus SaberFc<NV, AK_INT8>::create(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
FcParam<NV>& param, Context<NV>& ctx){
if (!(&ctx == this->_ctx)) {
this->_ctx = &ctx;
}
Shape shape_out = inputs[0]->valid_shape();
_M = inputs[0]->count_valid(0, param.axis);
_K = inputs[0]->count_valid(param.axis, inputs[0]->dims());
_N = param.num_output;
_flag_trans_weights = param.is_transpose_weights;
if (_N <= 0) {
int weight_size = param.weights->valid_size();
_N = weight_size / _K;
}
//! weights dims must be in h and w
_gemm_s8f32->init(false, !_flag_trans_weights, _M, _N, _K, *_ctx);
return SaberSuccess;
}
template <>
SaberStatus SaberFc<NV, AK_INT8>::init(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
FcParam<NV>& param, Context<NV> &ctx) {
// get context
this->_ctx = &ctx;
int generate_arch = Env<NV>::cur_env()[_ctx->get_device_id()]._info._generate_arch;
bool arch_check = generate_arch == 61;
if (arch_check) {
_gemm_s8f32 = new Gemm<NV, VENDER_IMPL, char, float>;
if (param.weights->get_dtype() == AK_FLOAT) {
Tensor<NVHX86> _host_weight;
_trans_weight.re_alloc(param.weights->valid_shape(), AK_INT8);
_host_weight.re_alloc(param.weights->valid_shape(), AK_FLOAT);
_host_weight.copy_from(*param.weights);
std::vector<float> scale;
get_tensor_scale(scale, _host_weight, 0, false);
param.weights->set_scale(scale);
_trans_weight.set_scale(scale);
flatten_calibrate<NV, char, float>(_trans_weight, *param.weights, ctx);
_trans_weight.record_event(ctx.get_compute_stream());
_trans_weight.sync();
}
} else {
LOG(FATAL) << "not support this arch!! ";
}
return create(inputs, outputs, param, ctx);
}
template <>
SaberStatus SaberFc<NV, AK_INT8>::dispatch(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs, FcParam<NV>& param) {
cudaStream_t stream = this->_ctx->get_compute_stream();
const char *din = (const char *)inputs[0]->data();
float *dout = (float *)outputs[0]->mutable_data();
const char *weight = nullptr;
if (param.weights->get_dtype() == AK_INT8) {
weight = (const char *)param.weights->data();
} else {
weight = (const char *)_trans_weight.data();
}
const float *bias = nullptr;
bool bias_term = param.bias != nullptr;
if (bias_term) {
bias = (const float *)param.bias->data();
}
_inner_tensor.re_alloc(inputs[0]->valid_shape(), AK_INT8);
layout_trans_nchwc4_2_nchw(_inner_tensor, *inputs[0],
inputs[0]->get_scale()[0], *_ctx);
din = (const char*)_inner_tensor.data();
float beta = 0.f;
float alpha = 1.f;
if (param.weights->get_scale().size() == 1) {
CHECK_GE(inputs[0]->get_scale().size(), 1);
alpha = inputs[0]->get_scale()[0] * param.weights->get_scale()[0];
}
if (outputs[0]->get_dtype() == AK_INT8) {
LOG(FATAL) << " this is not right!";
// CHECK_GE(outputs[0]->get_scale().size(), 1);
// alpha /= outputs[0]->get_scale()[0];
}
_gemm_s8f32->dispatch(alpha, beta, din, weight, dout);
if (bias_term) {
int total_size = _M * _N;
add_bias<float><<<CUDA_GET_BLOCKS(total_size), CUDA_NUM_THREADS, 0, stream>>>\
(total_size, _N, bias, dout);
}
return SaberSuccess;
}
template <>
SaberStatus SaberFc<NV, AK_FLOAT>::dispatch(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
FcParam<NV>& param) {
cudaStream_t stream = this->_ctx->get_compute_stream();
const float *din = (const float *)inputs[0]->data();
float *dout = (float *)outputs[0]->mutable_data();
const float *weight = (float *)param.weights->data();
const float *bias = nullptr;
bool bias_term = param.bias != nullptr;
if (bias_term) {
bias = (const float *)param.bias->data();
}
float alpha = 1.f;
float beta = 0.f;
_gemm->dispatch(alpha, beta, din, weight, dout);
if (bias_term) {
int total_size = _M * _N;
add_bias<float><<<CUDA_GET_BLOCKS(total_size), CUDA_NUM_THREADS, 0, stream>>>\
(total_size, _N, bias, dout);
}
return SaberSuccess;
}
template class SaberFc<NV, AK_INT8>;
template class SaberFc<NV, AK_FLOAT>;
DEFINE_OP_TEMPLATE(SaberFc, FcParam, NV, AK_HALF);
} //namespace anakin
} //namespace anakin
|
30c4575c79030ac6f0b9504fcd10d316b2f17345.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include "device_launch_parameters.h"
#include <stdio.h>
#include "cube.cuh"
__global__ void cube_core(int *dev_a, int *dev_b){
int tid=blockIdx.x;
int tmp=*(dev_a+tid);
*(dev_b+tid)=tmp*tmp*tmp;
}
void cube(int result[], int n){
int a[n];
for(int i=0;i<n;i++){
a[i]=i;
}
int *dev_a=NULL;
int *dev_b=NULL;
hipMalloc((void**)&dev_a,n*sizeof(int));
hipMemset((void**)&dev_a,0,n*sizeof(int));
hipMalloc((void**)&dev_b,n*sizeof(int));
hipMemset((void**)&dev_b,0,n*sizeof(int));
hipMemcpy(dev_a,(void**)&a,n*sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cube_core), dim3(n),dim3(1), 0, 0, dev_a,dev_b);
hipMemcpy((void **)&result[0],dev_b,n*sizeof(int),hipMemcpyDeviceToHost);
}
| 30c4575c79030ac6f0b9504fcd10d316b2f17345.cu | #include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <stdio.h>
#include "cube.cuh"
__global__ void cube_core(int *dev_a, int *dev_b){
int tid=blockIdx.x;
int tmp=*(dev_a+tid);
*(dev_b+tid)=tmp*tmp*tmp;
}
void cube(int result[], int n){
int a[n];
for(int i=0;i<n;i++){
a[i]=i;
}
int *dev_a=NULL;
int *dev_b=NULL;
cudaMalloc((void**)&dev_a,n*sizeof(int));
cudaMemset((void**)&dev_a,0,n*sizeof(int));
cudaMalloc((void**)&dev_b,n*sizeof(int));
cudaMemset((void**)&dev_b,0,n*sizeof(int));
cudaMemcpy(dev_a,(void**)&a,n*sizeof(int),cudaMemcpyHostToDevice);
cube_core<<<n,1>>>(dev_a,dev_b);
cudaMemcpy((void **)&result[0],dev_b,n*sizeof(int),cudaMemcpyDeviceToHost);
}
|
2100c6d7ed10d77c843c3d85ba567235437e603a.hip | // !!! This is a file automatically generated by hipify!!!
#include "CUDA.h"
/* ------------------------ */
double *allocdev_double(int len){
double *dev_array;
hipMalloc( (void**)&dev_array, len*sizeof(double) );
return dev_array;
}
int *allocdev_int(int len){
int *dev_array;
hipMalloc( (void**)&dev_array, len*sizeof(int) );
return dev_array;
}
hiprandState_t *host2dev(int len, hiprandState_t host_array[]){
hiprandState_t *dev_array;
hipMalloc( (void**)&dev_array, len*sizeof(hiprandState_t) );
hipMemcpy(dev_array, host_array, len*sizeof(hiprandState_t), hipMemcpyHostToDevice);
return dev_array;
}
double *host2dev(int len, double host_array[]){
double *dev_array;
hipMalloc( (void**)&dev_array, len*sizeof(double) );
hipMemcpy(dev_array, host_array, len*sizeof(double), hipMemcpyHostToDevice);
return dev_array;
}
int *host2dev(int len, int host_array[]){
int *dev_array;
hipMalloc( (void**)&dev_array, len*sizeof(int) );
hipMemcpy(dev_array, host_array, len*sizeof(int), hipMemcpyHostToDevice);
return dev_array;
}
unsigned long long *host2dev(int len, unsigned long long host_array[]){
unsigned long long *dev_array;
hipMalloc( (void**)&dev_array, len*sizeof(unsigned long long) );
hipMemcpy(dev_array, host_array, len*sizeof(unsigned long long), hipMemcpyHostToDevice);
return dev_array;
}
void h2d(double *dev_array, int len, double host_array[]){
hipMemcpy(dev_array, host_array, len*sizeof(double), hipMemcpyHostToDevice);
}
void h2d(int *dev_array, int len, int host_array[]){
hipMemcpy(dev_array, host_array, len*sizeof(int), hipMemcpyHostToDevice);
}
void dev2host(hiprandState_t host_array[], int len, hiprandState_t *dev_array){
hipMemcpy(host_array, dev_array, len*sizeof(hiprandState_t),hipMemcpyDeviceToHost);
}
void dev2host(double host_array[], int len, double *dev_array){
hipMemcpy(host_array, dev_array, len*sizeof(double),hipMemcpyDeviceToHost);
}
void dev2host(int host_array[], int len, int *dev_array){
hipMemcpy(host_array, dev_array, len*sizeof(int),hipMemcpyDeviceToHost);
}
void dev2host(unsigned long long host_array[], int len, unsigned long long *dev_array){
hipMemcpy(host_array, dev_array, len*sizeof(unsigned long long),hipMemcpyDeviceToHost);
}
/*---------------------------------------*/
| 2100c6d7ed10d77c843c3d85ba567235437e603a.cu | #include "CUDA.h"
/* ------------------------ */
double *allocdev_double(int len){
double *dev_array;
cudaMalloc( (void**)&dev_array, len*sizeof(double) );
return dev_array;
}
int *allocdev_int(int len){
int *dev_array;
cudaMalloc( (void**)&dev_array, len*sizeof(int) );
return dev_array;
}
curandState *host2dev(int len, curandState host_array[]){
curandState *dev_array;
cudaMalloc( (void**)&dev_array, len*sizeof(curandState) );
cudaMemcpy(dev_array, host_array, len*sizeof(curandState), cudaMemcpyHostToDevice);
return dev_array;
}
double *host2dev(int len, double host_array[]){
double *dev_array;
cudaMalloc( (void**)&dev_array, len*sizeof(double) );
cudaMemcpy(dev_array, host_array, len*sizeof(double), cudaMemcpyHostToDevice);
return dev_array;
}
int *host2dev(int len, int host_array[]){
int *dev_array;
cudaMalloc( (void**)&dev_array, len*sizeof(int) );
cudaMemcpy(dev_array, host_array, len*sizeof(int), cudaMemcpyHostToDevice);
return dev_array;
}
unsigned long long *host2dev(int len, unsigned long long host_array[]){
unsigned long long *dev_array;
cudaMalloc( (void**)&dev_array, len*sizeof(unsigned long long) );
cudaMemcpy(dev_array, host_array, len*sizeof(unsigned long long), cudaMemcpyHostToDevice);
return dev_array;
}
void h2d(double *dev_array, int len, double host_array[]){
cudaMemcpy(dev_array, host_array, len*sizeof(double), cudaMemcpyHostToDevice);
}
void h2d(int *dev_array, int len, int host_array[]){
cudaMemcpy(dev_array, host_array, len*sizeof(int), cudaMemcpyHostToDevice);
}
void dev2host(curandState host_array[], int len, curandState *dev_array){
cudaMemcpy(host_array, dev_array, len*sizeof(curandState),cudaMemcpyDeviceToHost);
}
void dev2host(double host_array[], int len, double *dev_array){
cudaMemcpy(host_array, dev_array, len*sizeof(double),cudaMemcpyDeviceToHost);
}
void dev2host(int host_array[], int len, int *dev_array){
cudaMemcpy(host_array, dev_array, len*sizeof(int),cudaMemcpyDeviceToHost);
}
void dev2host(unsigned long long host_array[], int len, unsigned long long *dev_array){
cudaMemcpy(host_array, dev_array, len*sizeof(unsigned long long),cudaMemcpyDeviceToHost);
}
/*---------------------------------------*/
|
0b6aff77d5432e6d0239f82a6cba7dbd695c130c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernelParms.cuh"
#include "complexGpu.cuh"
#include "utils/index.cuh"
#include "enum-field.h"
#include "scalar/scalarField.h"
#include "utils/parse.h"
using namespace gpuCu;
using namespace indexHelper;
template<class Float>
__global__ void scaleKernel (complex<Float> * __restrict__ fD, uint V, Float factor)
{
uint idx = (threadIdx.x + blockDim.x*(blockIdx.x + gridDim.x*blockIdx.y));
if (idx >= V)
return;
fD[idx] *= factor;
}
void scaleGpu (Scalar *sField, FieldIndex fIdx, double factor)
{
const uint Lx = sField->Length();
const uint Lz = sField->Depth();
#define BLSIZE 512
dim3 gridSize((Lx*Lx+BLSIZE-1)/BLSIZE,Lz+2,1);
dim3 blockSize(BLSIZE,1,1);
switch (sField->Precision())
{
case FIELD_DOUBLE:
{
complex<double> *field;
uint V = sField->Size();
switch (fIdx)
{
case FIELD_M:
field = static_cast<complex<double>*> (sField->mGpu());
V = sField->eSize();
break;
case FIELD_V:
field = static_cast<complex<double>*> (sField->vGpu());
break;
case FIELD_M2:
if (sField->LowMem()) {
printf ("Wrong field. Lowmem forbids the use of m2");
return;
}
field = static_cast<complex<double>*> (sField->m2Gpu());
V = sField->eSize();
break;
case FIELD_MV:
printf ("Not implemented yet. Please call scale with FIELD_M and then with FIELD_V\n");
break;
default:
printf ("Wrong field. Valid possibilities: FIELD_M, FIELD_M2 and FIELD_V");
return;
}
hipLaunchKernelGGL(( scaleKernel<double>), dim3(gridSize), dim3(blockSize), 0, ((hipStream_t *)sField->Streams())[0], field, V, factor);
break;
}
case FIELD_SINGLE:
{
complex<float> *field;
uint V = sField->Size();
switch (fIdx)
{
case FIELD_M:
field = static_cast<complex<float> *> (sField->mGpu());
V = sField->eSize();
break;
case FIELD_V:
field = static_cast<complex<float> *> (sField->vGpu());
break;
case FIELD_M2:
if (sField->LowMem()) {
printf ("Wrong field. Lowmem forbids the use of m2");
return;
}
field = static_cast<complex<float> *> (sField->m2Gpu());
V = sField->eSize();
break;
case FIELD_MV:
printf ("Not implemented yet. Please call scale with FIELD_M and then with FIELD_V\n");
break;
default:
printf ("Wrong field. Valid possibilities: FIELD_M, FIELD_M2 and FIELD_V");
break;
}
hipLaunchKernelGGL(( scaleKernel<float>), dim3(gridSize), dim3(blockSize), 0, ((hipStream_t *)sField->Streams())[0], field, V, (float) factor);
break;
}
default:
printf("Unrecognized precision\n");
exit(1);
break;
}
}
| 0b6aff77d5432e6d0239f82a6cba7dbd695c130c.cu | #include "kernelParms.cuh"
#include "complexGpu.cuh"
#include "utils/index.cuh"
#include "enum-field.h"
#include "scalar/scalarField.h"
#include "utils/parse.h"
using namespace gpuCu;
using namespace indexHelper;
template<class Float>
__global__ void scaleKernel (complex<Float> * __restrict__ fD, uint V, Float factor)
{
uint idx = (threadIdx.x + blockDim.x*(blockIdx.x + gridDim.x*blockIdx.y));
if (idx >= V)
return;
fD[idx] *= factor;
}
void scaleGpu (Scalar *sField, FieldIndex fIdx, double factor)
{
const uint Lx = sField->Length();
const uint Lz = sField->Depth();
#define BLSIZE 512
dim3 gridSize((Lx*Lx+BLSIZE-1)/BLSIZE,Lz+2,1);
dim3 blockSize(BLSIZE,1,1);
switch (sField->Precision())
{
case FIELD_DOUBLE:
{
complex<double> *field;
uint V = sField->Size();
switch (fIdx)
{
case FIELD_M:
field = static_cast<complex<double>*> (sField->mGpu());
V = sField->eSize();
break;
case FIELD_V:
field = static_cast<complex<double>*> (sField->vGpu());
break;
case FIELD_M2:
if (sField->LowMem()) {
printf ("Wrong field. Lowmem forbids the use of m2");
return;
}
field = static_cast<complex<double>*> (sField->m2Gpu());
V = sField->eSize();
break;
case FIELD_MV:
printf ("Not implemented yet. Please call scale with FIELD_M and then with FIELD_V\n");
break;
default:
printf ("Wrong field. Valid possibilities: FIELD_M, FIELD_M2 and FIELD_V");
return;
}
scaleKernel<double><<<gridSize, blockSize, 0, ((cudaStream_t *)sField->Streams())[0]>>> (field, V, factor);
break;
}
case FIELD_SINGLE:
{
complex<float> *field;
uint V = sField->Size();
switch (fIdx)
{
case FIELD_M:
field = static_cast<complex<float> *> (sField->mGpu());
V = sField->eSize();
break;
case FIELD_V:
field = static_cast<complex<float> *> (sField->vGpu());
break;
case FIELD_M2:
if (sField->LowMem()) {
printf ("Wrong field. Lowmem forbids the use of m2");
return;
}
field = static_cast<complex<float> *> (sField->m2Gpu());
V = sField->eSize();
break;
case FIELD_MV:
printf ("Not implemented yet. Please call scale with FIELD_M and then with FIELD_V\n");
break;
default:
printf ("Wrong field. Valid possibilities: FIELD_M, FIELD_M2 and FIELD_V");
break;
}
scaleKernel<float><<<gridSize, blockSize, 0, ((cudaStream_t *)sField->Streams())[0]>>> (field, V, (float) factor);
break;
}
default:
printf("Unrecognized precision\n");
exit(1);
break;
}
}
|
18e3ccfc0442c81a83091985126ebdc4a4651fef.hip | // !!! This is a file automatically generated by hipify!!!
/*--------------------------------------------------------------------------*\
Copyright (c) 2008-2010, Danny Ruijters. All rights reserved.
http://www.dannyruijters.nl/cubicinterpolation/
This file is part of CUDA Cubic B-Spline Interpolation (CI).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
\*--------------------------------------------------------------------------*/
#ifndef _CUBIC3D_KERNEL_H_
#define _CUBIC3D_KERNEL_H_
#include "internal/bspline_kernel.cu"
//! Trilinearly interpolated texture lookup, using unnormalized coordinates.
//! This function merely serves as a reference for the tricubic versions.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
template<class T, enum hipTextureReadMode mode>
__device__ float linearTex3D( texture<T, 3, mode> tex, float3 coord )
{
return tex3D( tex, coord.x, coord.y, coord.z );
}
//! Tricubic interpolated texture lookup, using unnormalized coordinates.
//! Straight forward implementation, using 64 nearest neighbour lookups.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
template<class T, enum hipTextureReadMode mode>
__device__ float cubicTex3DSimple( texture<T, 3, mode> tex, float3 coord )
{
// transform the coordinate from [0,extent] to [-0.5, extent-0.5]
const float3 coord_grid = coord - 0.5f;
float3 index = floor(coord_grid);
const float3 fraction = coord_grid - index;
index = index + 0.5f; //move from [-0.5, extent-0.5] to [0, extent]
float result = 0.0f;
for ( float z = -1; z < 2.5f; z++ ) //range [-1, 2]
{
float bsplineZ = bspline( z - fraction.z );
float w = index.z + z;
for ( float y = -1; y < 2.5f; y++ )
{
float bsplineYZ = bspline( y - fraction.y ) * bsplineZ;
float v = index.y + y;
for ( float x = -1; x < 2.5f; x++ )
{
float bsplineXYZ = bspline( x - fraction.x ) * bsplineYZ;
float u = index.x + x;
result += bsplineXYZ * tex3D( tex, u, v, w );
}
}
}
return result;
}
//! Tricubic interpolated texture lookup, using unnormalized coordinates.
//! Fast implementation, using 8 trilinear lookups.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
#define WEIGHTS bspline_weights
#define CUBICTEX3D cubicTex3D
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
// Fast tricubic interpolated 1st order derivative texture lookup in x-, y-
// and z-direction, using unnormalized coordinates.
__device__ void bspline_weights_1st_derivative_x( float3 fraction,
float3& w0, float3& w1, float3& w2, float3& w3 )
{
float t0, t1, t2, t3;
bspline_weights_1st_derivative( fraction.x, t0, t1, t2, t3 );
w0.x = t0; w1.x = t1; w2.x = t2; w3.x = t3;
bspline_weights( fraction.y, t0, t1, t2, t3 );
w0.y = t0; w1.y = t1; w2.y = t2; w3.y = t3;
bspline_weights( fraction.z, t0, t1, t2, t3 );
w0.z = t0; w1.z = t1; w2.z = t2; w3.z = t3;
}
__device__ void bspline_weights_1st_derivative_y( float3 fraction,
float3& w0, float3& w1, float3& w2, float3& w3 )
{
float t0, t1, t2, t3;
bspline_weights( fraction.x, t0, t1, t2, t3 );
w0.x = t0; w1.x = t1; w2.x = t2; w3.x = t3;
bspline_weights_1st_derivative( fraction.y, t0, t1, t2, t3 );
w0.y = t0; w1.y = t1; w2.y = t2; w3.y = t3;
bspline_weights( fraction.z, t0, t1, t2, t3 );
w0.z = t0; w1.z = t1; w2.z = t2; w3.z = t3;
}
__device__ void bspline_weights_1st_derivative_z( float3 fraction,
float3& w0, float3& w1, float3& w2, float3& w3 )
{
float t0, t1, t2, t3;
bspline_weights( fraction.x, t0, t1, t2, t3 );
w0.x = t0; w1.x = t1; w2.x = t2; w3.x = t3;
bspline_weights( fraction.y, t0, t1, t2, t3 );
w0.y = t0; w1.y = t1; w2.y = t2; w3.y = t3;
bspline_weights_1st_derivative( fraction.z, t0, t1, t2, t3 );
w0.z = t0; w1.z = t1; w2.z = t2; w3.z = t3;
}
#define WEIGHTS bspline_weights_1st_derivative_x
#define CUBICTEX3D cubicTex3D_1st_derivative_x
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
#define WEIGHTS bspline_weights_1st_derivative_y
#define CUBICTEX3D cubicTex3D_1st_derivative_y
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
#define WEIGHTS bspline_weights_1st_derivative_z
#define CUBICTEX3D cubicTex3D_1st_derivative_z
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
#endif // _CUBIC3D_KERNEL_H_
| 18e3ccfc0442c81a83091985126ebdc4a4651fef.cu | /*--------------------------------------------------------------------------*\
Copyright (c) 2008-2010, Danny Ruijters. All rights reserved.
http://www.dannyruijters.nl/cubicinterpolation/
This file is part of CUDA Cubic B-Spline Interpolation (CI).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
\*--------------------------------------------------------------------------*/
#ifndef _CUBIC3D_KERNEL_H_
#define _CUBIC3D_KERNEL_H_
#include "internal/bspline_kernel.cu"
//! Trilinearly interpolated texture lookup, using unnormalized coordinates.
//! This function merely serves as a reference for the tricubic versions.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
template<class T, enum cudaTextureReadMode mode>
__device__ float linearTex3D( texture<T, 3, mode> tex, float3 coord )
{
return tex3D( tex, coord.x, coord.y, coord.z );
}
//! Tricubic interpolated texture lookup, using unnormalized coordinates.
//! Straight forward implementation, using 64 nearest neighbour lookups.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
template<class T, enum cudaTextureReadMode mode>
__device__ float cubicTex3DSimple( texture<T, 3, mode> tex, float3 coord )
{
// transform the coordinate from [0,extent] to [-0.5, extent-0.5]
const float3 coord_grid = coord - 0.5f;
float3 index = floor(coord_grid);
const float3 fraction = coord_grid - index;
index = index + 0.5f; //move from [-0.5, extent-0.5] to [0, extent]
float result = 0.0f;
for ( float z = -1; z < 2.5f; z++ ) //range [-1, 2]
{
float bsplineZ = bspline( z - fraction.z );
float w = index.z + z;
for ( float y = -1; y < 2.5f; y++ )
{
float bsplineYZ = bspline( y - fraction.y ) * bsplineZ;
float v = index.y + y;
for ( float x = -1; x < 2.5f; x++ )
{
float bsplineXYZ = bspline( x - fraction.x ) * bsplineYZ;
float u = index.x + x;
result += bsplineXYZ * tex3D( tex, u, v, w );
}
}
}
return result;
}
//! Tricubic interpolated texture lookup, using unnormalized coordinates.
//! Fast implementation, using 8 trilinear lookups.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
#define WEIGHTS bspline_weights
#define CUBICTEX3D cubicTex3D
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
// Fast tricubic interpolated 1st order derivative texture lookup in x-, y-
// and z-direction, using unnormalized coordinates.
__device__ void bspline_weights_1st_derivative_x( float3 fraction,
float3& w0, float3& w1, float3& w2, float3& w3 )
{
float t0, t1, t2, t3;
bspline_weights_1st_derivative( fraction.x, t0, t1, t2, t3 );
w0.x = t0; w1.x = t1; w2.x = t2; w3.x = t3;
bspline_weights( fraction.y, t0, t1, t2, t3 );
w0.y = t0; w1.y = t1; w2.y = t2; w3.y = t3;
bspline_weights( fraction.z, t0, t1, t2, t3 );
w0.z = t0; w1.z = t1; w2.z = t2; w3.z = t3;
}
__device__ void bspline_weights_1st_derivative_y( float3 fraction,
float3& w0, float3& w1, float3& w2, float3& w3 )
{
float t0, t1, t2, t3;
bspline_weights( fraction.x, t0, t1, t2, t3 );
w0.x = t0; w1.x = t1; w2.x = t2; w3.x = t3;
bspline_weights_1st_derivative( fraction.y, t0, t1, t2, t3 );
w0.y = t0; w1.y = t1; w2.y = t2; w3.y = t3;
bspline_weights( fraction.z, t0, t1, t2, t3 );
w0.z = t0; w1.z = t1; w2.z = t2; w3.z = t3;
}
__device__ void bspline_weights_1st_derivative_z( float3 fraction,
float3& w0, float3& w1, float3& w2, float3& w3 )
{
float t0, t1, t2, t3;
bspline_weights( fraction.x, t0, t1, t2, t3 );
w0.x = t0; w1.x = t1; w2.x = t2; w3.x = t3;
bspline_weights( fraction.y, t0, t1, t2, t3 );
w0.y = t0; w1.y = t1; w2.y = t2; w3.y = t3;
bspline_weights_1st_derivative( fraction.z, t0, t1, t2, t3 );
w0.z = t0; w1.z = t1; w2.z = t2; w3.z = t3;
}
#define WEIGHTS bspline_weights_1st_derivative_x
#define CUBICTEX3D cubicTex3D_1st_derivative_x
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
#define WEIGHTS bspline_weights_1st_derivative_y
#define CUBICTEX3D cubicTex3D_1st_derivative_y
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
#define WEIGHTS bspline_weights_1st_derivative_z
#define CUBICTEX3D cubicTex3D_1st_derivative_z
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
#endif // _CUBIC3D_KERNEL_H_
|
e43bde7705ee007526c9bb43279704b71ba865ca.hip | // !!! This is a file automatically generated by hipify!!!
/*
Author Javier Rodrguez
A01152572
*/
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#define N (1000000)
#define THREADS_PER_BLOCK 1000
//pi on cpu
double getPiCpu(){
long num_rects = N, i;
double mid, height, width, area;
double sum = 0.0;
width = 1.0 / (double) num_rects;
for (i = 0; i < num_rects; i++) {
mid = (i + 0.5) * width;
height = 4.0 / (1.0 + mid * mid);
sum += height;
}
area = width * sum;
return area;
}
//Pi gpu
__global__ void getPiGpu(double *a, long n) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//Pi variables
double mid, width;
width = 1.0 / (long) n;
if(tid < n){
mid = (tid + 0.5) * width;
a[tid] = 4.0 / (1.0 + mid * mid);
}
}
// double piSum(double *a){
// double sum, width, pi;
// long num_rects = N;
//
// width = 1.0 / (double) num_rects;
//
// for (long i = 0; i < N; i++) {
// sum += a[i];
// }
// pi = width * sum;
// return pi;
// }
int main() {
double piCpu, piGpu;
double sum, width;
double a[N];
double *d_a;
double size = N * sizeof(double);
d_a=(double*)malloc(size);
hipMalloc((void**)&d_a, size);
//time on gpu
clock_t timeOnGpu = clock();
//kernel call
hipLaunchKernelGGL(( getPiGpu), dim3(N/THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, d_a, N);
//devicetohost recuperar array de heights
hipMemcpy(a, d_a, size, hipMemcpyDeviceToHost);
hipFree(d_a);
// piGpu = piSum(a[N]);
width = 1.0 / (double) N;
for (long i = 0; i < N; i++) {
sum += a[i];
}
piGpu = width * sum;
printf("%f\n", piGpu);
printf("time on GPU %f \n", ((double)clock() - timeOnGpu)/CLOCKS_PER_SEC);
//Get pi cpu and print
clock_t timeOnCpu = clock();
piCpu = getPiCpu();
printf("%lf\n", piCpu);
printf("time on CPU %f \n", ((double)clock() - timeOnCpu)/CLOCKS_PER_SEC);
return 0;
}
| e43bde7705ee007526c9bb43279704b71ba865ca.cu | /*
Author Javier Rodríguez
A01152572
*/
#include "cuda_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#define N (1000000)
#define THREADS_PER_BLOCK 1000
//pi on cpu
double getPiCpu(){
long num_rects = N, i;
double mid, height, width, area;
double sum = 0.0;
width = 1.0 / (double) num_rects;
for (i = 0; i < num_rects; i++) {
mid = (i + 0.5) * width;
height = 4.0 / (1.0 + mid * mid);
sum += height;
}
area = width * sum;
return area;
}
//Pi gpu
__global__ void getPiGpu(double *a, long n) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//Pi variables
double mid, width;
width = 1.0 / (long) n;
if(tid < n){
mid = (tid + 0.5) * width;
a[tid] = 4.0 / (1.0 + mid * mid);
}
}
// double piSum(double *a){
// double sum, width, pi;
// long num_rects = N;
//
// width = 1.0 / (double) num_rects;
//
// for (long i = 0; i < N; i++) {
// sum += a[i];
// }
// pi = width * sum;
// return pi;
// }
int main() {
double piCpu, piGpu;
double sum, width;
double a[N];
double *d_a;
double size = N * sizeof(double);
d_a=(double*)malloc(size);
cudaMalloc((void**)&d_a, size);
//time on gpu
clock_t timeOnGpu = clock();
//kernel call
getPiGpu<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, N);
//devicetohost recuperar array de heights
cudaMemcpy(a, d_a, size, cudaMemcpyDeviceToHost);
cudaFree(d_a);
// piGpu = piSum(a[N]);
width = 1.0 / (double) N;
for (long i = 0; i < N; i++) {
sum += a[i];
}
piGpu = width * sum;
printf("%f\n", piGpu);
printf("time on GPU %f \n", ((double)clock() - timeOnGpu)/CLOCKS_PER_SEC);
//Get pi cpu and print
clock_t timeOnCpu = clock();
piCpu = getPiCpu();
printf("%lf\n", piCpu);
printf("time on CPU %f \n", ((double)clock() - timeOnCpu)/CLOCKS_PER_SEC);
return 0;
}
|
8966a1a87bc0f252254a178eb29a4b8f1e6ad8b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include <time.h>
#include <windows.h>
#include <stdio.h>
#include <stdlib.h>
//Prototypes
void GPU_fill_rand(float *A, int nrRowsA, int nrColsA);
void CPU_fill_matrices(float* A, int nrRowsA, int nrColsA);
void gpu_blas_mmul(const float *A, const float *B, float *C, const int m, const int k, const int n);
void print_matrix(const float *A, int nr_rows_A, int nr_cols_A);
void fprint_MemCpy_Times(int matrixSize, int iterationnr, int msec, char *currentMatrix, char *fileName);
void fprint_sgemm_time(int matrixSize, int iterationnr, int msec, char *fileName);
void output_matrix(const float *A, int nr_rows_A, int nr_cols_A, char *fileName);
int main() {
printf("Initializing...\n");
int nrRowsA, nrColsA, nrRowsB, nrColsB, nrRowsC, nrColsC;
int matrixStartSize = 100,
matrixMaxSize = 3500,
matrixIncrease = 100,
sgemmIterations = 50,
/*sgemmIterationsMinimum = 0,
sgemmIterationsDecrease = 0;*/
int matrixActualSize = matrixStartSize;
float *h_A, *h_B, *h_C, *d_A, *d_B, *d_C;
srand(time(NULL));
hipError_t error;
printf("Copying from matrix size %d to %d.\n", matrixStartSize, matrixMaxSize);
printf("Increasing size with %d for each iteration.\n\n", matrixIncrease);
// Calculations
printf("Initializing complete. Starting calculations...\n");
while (matrixActualSize <= matrixMaxSize){
printf("Calculating with size %d: ", matrixActualSize);
// Square Arrays
nrRowsA = nrColsA = nrRowsB = nrColsB = nrRowsC = nrColsC = matrixActualSize;
for (int k = 0; k < sgemmIterations; k++){
if (k % 5 == 0)
{
printf("%d ", k);
}
// Allocate memory on Host
h_A = (float*)malloc(nrRowsA * nrColsA * sizeof(float));
if (h_A == NULL) {
printf("CPU: h_A was not allocated: %d", k);
return EXIT_FAILURE;
}
h_B = (float*)malloc(nrRowsB * nrColsB * sizeof(float));
if (h_B == NULL) {
printf("CPU: h_B was not allocated: %d", k);
return EXIT_FAILURE;
}
h_C = (float*)malloc(nrRowsC * nrColsC * sizeof(float));
if (h_C == NULL) {
printf("CPU: h_C was not allocated: %d", k);
return EXIT_FAILURE;
}
// Allocate memory on Device
error = hipMalloc(&d_A, nrRowsA * nrColsA * sizeof(float));
if (error != hipSuccess) {
printf("Memory was not allocated for matrix A: %d", k);
return EXIT_FAILURE;
}
error = hipMalloc(&d_B, nrRowsB * nrColsB * sizeof(float));
if (error != hipSuccess) {
printf("Memory was not allocated for matrix B: %d", k);
return EXIT_FAILURE;
}
error = hipMalloc(&d_C, nrRowsC * nrColsC * sizeof(float));
if (error != hipSuccess) {
printf("Memory was not allocated for matrix C: %d", k);
return EXIT_FAILURE;
}
// Fill the arrays A and B on GPU with random numbers
//GPU_fill_rand(d_A, nrRowsA, nrColsA);
//GPU_fill_rand(d_B, nrRowsB, nrColsB);
CPU_fill_matrices(h_A, nrRowsA, nrColsA);
CPU_fill_matrices(h_B, nrColsB, nrColsB);
//Copy h_A and h_B to the device
clock_t startHtoDA = clock(), diffHtoDA;
error = hipMemcpy(d_A, h_A, nrRowsA * nrColsA * sizeof(float), hipMemcpyHostToDevice);
if (error != hipSuccess){
printf("Copying matrice h_A HtoD failed.\n: %d", k);
return EXIT_FAILURE;
}
diffHtoDA = clock() - startHtoDA;
int msecHtoDA = diffHtoDA * 1000 / CLOCKS_PER_SEC;
fprint_MemCpy_Times(matrixActualSize, k, msecHtoDA, "MemCpy:A", "./MemCpyHtoDTimes.txt");
clock_t startHtoDB = clock(), diffHtoDB;
error = hipMemcpy(d_B, h_B, nrRowsB * nrColsB * sizeof(float), hipMemcpyHostToDevice);
if (error != hipSuccess){
printf("Copying matrice h_B HtoD failed.\n: %d", k);
return EXIT_FAILURE;
}
diffHtoDB = clock() - startHtoDB;
int msecHtoDB = diffHtoDB * 1000 / CLOCKS_PER_SEC;
fprint_MemCpy_Times(matrixActualSize, k, msecHtoDB, "MemCpy:B", "./MemCpyHtoDTimes.txt");
//Perform Sgemm on the device
clock_t startSgemm = clock(), diffSgemm;
gpu_blas_mmul(d_A, d_B, d_C, nrRowsA, nrColsA, nrColsB);
diffSgemm = clock() - startSgemm;
int msecSgemm = diffSgemm * 1000 / CLOCKS_PER_SEC;
fprint_sgemm_time(matrixActualSize, k, msecSgemm, "./SgemmGPUtimes.txt");
//Copy result back to the host
clock_t startDtoH = clock(), diffDtoH;
error = hipMemcpy(h_C, d_C, nrRowsC * nrColsC * sizeof(float), hipMemcpyDeviceToHost);
if (error != hipSuccess){
printf("Copying matrix d_C DtoH failed iteration %d", k);
return EXIT_FAILURE;
}
diffDtoH = clock() - startDtoH;
int msecDtoH = diffDtoH * 1000 / CLOCKS_PER_SEC;
fprint_MemCpy_Times(matrixActualSize, k, msecDtoH, "MemCpy:d_C", "./MemCpyResulttoH.txt");
//Free GPU memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipDeviceReset();
//Free CPU memory
free(h_A);
free(h_B);
free(h_C);
}
printf("- Size %d done!\n", matrixActualSize);
/*if (sgemmIterations > sgemmIterationsMinimum) {
sgemmIterations -= sgemmIterationsDecrease;
}*/
matrixActualSize += matrixIncrease;
}
printf("Done John\n");
printf("Press any key to exit...");
getchar();
return 0;
}
//Random fill matrices on device
void GPU_fill_rand(float *A, int nrRowsA, int nrColsA) {
hiprandGenerator_t prng;
hiprandCreateGenerator(&prng, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) clock());
hiprandGenerateUniform(prng, A, nrRowsA * nrColsA);
}
//Random fill matrices on host
void CPU_fill_matrices(float* A, int nrRowsA, int nrColsA) {
for (int r = 0; r < nrRowsA; r++) {
for (int c = 0; c < nrColsA; c++){
A[r * nrRowsA + c] = static_cast<float>(rand() % 20);
}
}
}
//Function that multiplies matrices on the device
void gpu_blas_mmul(const float *A, const float *B, float *C, const int m, const int k, const int n) {
int lda = m, ldb = k, ldc = m;
const float alf = 1;
const float bet = 0;
const float *alpha = &alf;
const float *beta = &bet;
// Create a handle for CUBLAS
hipblasHandle_t handle;
hipblasCreate(&handle);
// Do the actual multiplication
if (hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc) != HIPBLAS_STATUS_SUCCESS){
printf("hipblasSgemm failed");
}
// Destroy the handle
hipblasDestroy(handle);
}
//Print a given host matrix
void print_matrix(const float *A, int nr_rows_A, int nr_cols_A) {
for (int i = 0; i < nr_rows_A; ++i){
for (int j = 0; j < nr_cols_A; ++j){
printf("%f ", A[j * nr_rows_A + i]);
}
printf("\n");
}
printf("\n");
}
// Print times to a .txt
void fprint_MemCpy_Times(int matrixSize, int iterationnr, int msec, char *currentMatrix, char *fileName) {
FILE *f = fopen(fileName, "a");
if (f == NULL) {
printf("an error occured when opening GPUMemCopyTimes.txt\n");
printf("Press any key to exit...");
getchar();
exit(1);
}
fprintf(f, "iteration-%d-", iterationnr);
fprintf(f, "matrixSize-%d-", matrixSize);
fprintf(f, "matrixName-%s-time-%d%d\n", currentMatrix, msec / 1000, msec % 1000);
fclose(f);
}
// Print times to a .txt
void fprint_sgemm_time(int matrixSize, int iterationnr, int msec, char *fileName) {
FILE *f = fopen(fileName, "a");
if (f == NULL) {
printf("an error occured when opening GPUMemCopyTimes.txt\n");
printf("Press any key to exit...");
getchar();
exit(1);
}
fprintf(f, "iteration-%d-", iterationnr);
fprintf(f, "matrixSize-%d-", matrixSize);
fprintf(f, "time-%d%d\n", msec / 1000, msec % 1000);
fclose(f);
}
// Print a given matrix's entries to a file
void output_matrix(const float *A, int nr_rows_A, int nr_cols_A, char *fileName) {
FILE *f = fopen(fileName, "a");
if (f == NULL) {
printf("an error occured when opening a file\n");
printf("Press any key to exit...");
getchar();
exit(1);
}
for (int i = 0; i < nr_rows_A; ++i){
for (int j = 0; j < nr_cols_A; ++j){
fprintf(f, "%f, ", A[j * nr_rows_A + i]);
}
fprintf(f, "\n");
}
fprintf(f, "\n\n");
fclose(f);
}
| 8966a1a87bc0f252254a178eb29a4b8f1e6ad8b7.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include <time.h>
#include <windows.h>
#include <stdio.h>
#include <stdlib.h>
//Prototypes
void GPU_fill_rand(float *A, int nrRowsA, int nrColsA);
void CPU_fill_matrices(float* A, int nrRowsA, int nrColsA);
void gpu_blas_mmul(const float *A, const float *B, float *C, const int m, const int k, const int n);
void print_matrix(const float *A, int nr_rows_A, int nr_cols_A);
void fprint_MemCpy_Times(int matrixSize, int iterationnr, int msec, char *currentMatrix, char *fileName);
void fprint_sgemm_time(int matrixSize, int iterationnr, int msec, char *fileName);
void output_matrix(const float *A, int nr_rows_A, int nr_cols_A, char *fileName);
int main() {
printf("Initializing...\n");
int nrRowsA, nrColsA, nrRowsB, nrColsB, nrRowsC, nrColsC;
int matrixStartSize = 100,
matrixMaxSize = 3500,
matrixIncrease = 100,
sgemmIterations = 50,
/*sgemmIterationsMinimum = 0,
sgemmIterationsDecrease = 0;*/
int matrixActualSize = matrixStartSize;
float *h_A, *h_B, *h_C, *d_A, *d_B, *d_C;
srand(time(NULL));
cudaError_t error;
printf("Copying from matrix size %d to %d.\n", matrixStartSize, matrixMaxSize);
printf("Increasing size with %d for each iteration.\n\n", matrixIncrease);
// Calculations
printf("Initializing complete. Starting calculations...\n");
while (matrixActualSize <= matrixMaxSize){
printf("Calculating with size %d: ", matrixActualSize);
// Square Arrays
nrRowsA = nrColsA = nrRowsB = nrColsB = nrRowsC = nrColsC = matrixActualSize;
for (int k = 0; k < sgemmIterations; k++){
if (k % 5 == 0)
{
printf("%d ", k);
}
// Allocate memory on Host
h_A = (float*)malloc(nrRowsA * nrColsA * sizeof(float));
if (h_A == NULL) {
printf("CPU: h_A was not allocated: %d", k);
return EXIT_FAILURE;
}
h_B = (float*)malloc(nrRowsB * nrColsB * sizeof(float));
if (h_B == NULL) {
printf("CPU: h_B was not allocated: %d", k);
return EXIT_FAILURE;
}
h_C = (float*)malloc(nrRowsC * nrColsC * sizeof(float));
if (h_C == NULL) {
printf("CPU: h_C was not allocated: %d", k);
return EXIT_FAILURE;
}
// Allocate memory on Device
error = cudaMalloc(&d_A, nrRowsA * nrColsA * sizeof(float));
if (error != cudaSuccess) {
printf("Memory was not allocated for matrix A: %d", k);
return EXIT_FAILURE;
}
error = cudaMalloc(&d_B, nrRowsB * nrColsB * sizeof(float));
if (error != cudaSuccess) {
printf("Memory was not allocated for matrix B: %d", k);
return EXIT_FAILURE;
}
error = cudaMalloc(&d_C, nrRowsC * nrColsC * sizeof(float));
if (error != cudaSuccess) {
printf("Memory was not allocated for matrix C: %d", k);
return EXIT_FAILURE;
}
// Fill the arrays A and B on GPU with random numbers
//GPU_fill_rand(d_A, nrRowsA, nrColsA);
//GPU_fill_rand(d_B, nrRowsB, nrColsB);
CPU_fill_matrices(h_A, nrRowsA, nrColsA);
CPU_fill_matrices(h_B, nrColsB, nrColsB);
//Copy h_A and h_B to the device
clock_t startHtoDA = clock(), diffHtoDA;
error = cudaMemcpy(d_A, h_A, nrRowsA * nrColsA * sizeof(float), cudaMemcpyHostToDevice);
if (error != cudaSuccess){
printf("Copying matrice h_A HtoD failed.\n: %d", k);
return EXIT_FAILURE;
}
diffHtoDA = clock() - startHtoDA;
int msecHtoDA = diffHtoDA * 1000 / CLOCKS_PER_SEC;
fprint_MemCpy_Times(matrixActualSize, k, msecHtoDA, "MemCpy:A", "./MemCpyHtoDTimes.txt");
clock_t startHtoDB = clock(), diffHtoDB;
error = cudaMemcpy(d_B, h_B, nrRowsB * nrColsB * sizeof(float), cudaMemcpyHostToDevice);
if (error != cudaSuccess){
printf("Copying matrice h_B HtoD failed.\n: %d", k);
return EXIT_FAILURE;
}
diffHtoDB = clock() - startHtoDB;
int msecHtoDB = diffHtoDB * 1000 / CLOCKS_PER_SEC;
fprint_MemCpy_Times(matrixActualSize, k, msecHtoDB, "MemCpy:B", "./MemCpyHtoDTimes.txt");
//Perform Sgemm on the device
clock_t startSgemm = clock(), diffSgemm;
gpu_blas_mmul(d_A, d_B, d_C, nrRowsA, nrColsA, nrColsB);
diffSgemm = clock() - startSgemm;
int msecSgemm = diffSgemm * 1000 / CLOCKS_PER_SEC;
fprint_sgemm_time(matrixActualSize, k, msecSgemm, "./SgemmGPUtimes.txt");
//Copy result back to the host
clock_t startDtoH = clock(), diffDtoH;
error = cudaMemcpy(h_C, d_C, nrRowsC * nrColsC * sizeof(float), cudaMemcpyDeviceToHost);
if (error != cudaSuccess){
printf("Copying matrix d_C DtoH failed iteration %d", k);
return EXIT_FAILURE;
}
diffDtoH = clock() - startDtoH;
int msecDtoH = diffDtoH * 1000 / CLOCKS_PER_SEC;
fprint_MemCpy_Times(matrixActualSize, k, msecDtoH, "MemCpy:d_C", "./MemCpyResulttoH.txt");
//Free GPU memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaDeviceReset();
//Free CPU memory
free(h_A);
free(h_B);
free(h_C);
}
printf("- Size %d done!\n", matrixActualSize);
/*if (sgemmIterations > sgemmIterationsMinimum) {
sgemmIterations -= sgemmIterationsDecrease;
}*/
matrixActualSize += matrixIncrease;
}
printf("Done John\n");
printf("Press any key to exit...");
getchar();
return 0;
}
//Random fill matrices on device
void GPU_fill_rand(float *A, int nrRowsA, int nrColsA) {
curandGenerator_t prng;
curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) clock());
curandGenerateUniform(prng, A, nrRowsA * nrColsA);
}
//Random fill matrices on host
void CPU_fill_matrices(float* A, int nrRowsA, int nrColsA) {
for (int r = 0; r < nrRowsA; r++) {
for (int c = 0; c < nrColsA; c++){
A[r * nrRowsA + c] = static_cast<float>(rand() % 20);
}
}
}
//Function that multiplies matrices on the device
void gpu_blas_mmul(const float *A, const float *B, float *C, const int m, const int k, const int n) {
int lda = m, ldb = k, ldc = m;
const float alf = 1;
const float bet = 0;
const float *alpha = &alf;
const float *beta = &bet;
// Create a handle for CUBLAS
cublasHandle_t handle;
cublasCreate(&handle);
// Do the actual multiplication
if (cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc) != CUBLAS_STATUS_SUCCESS){
printf("cublasSgemm failed");
}
// Destroy the handle
cublasDestroy(handle);
}
//Print a given host matrix
void print_matrix(const float *A, int nr_rows_A, int nr_cols_A) {
for (int i = 0; i < nr_rows_A; ++i){
for (int j = 0; j < nr_cols_A; ++j){
printf("%f ", A[j * nr_rows_A + i]);
}
printf("\n");
}
printf("\n");
}
// Print times to a .txt
void fprint_MemCpy_Times(int matrixSize, int iterationnr, int msec, char *currentMatrix, char *fileName) {
FILE *f = fopen(fileName, "a");
if (f == NULL) {
printf("an error occured when opening GPUMemCopyTimes.txt\n");
printf("Press any key to exit...");
getchar();
exit(1);
}
fprintf(f, "iteration-%d-", iterationnr);
fprintf(f, "matrixSize-%d-", matrixSize);
fprintf(f, "matrixName-%s-time-%d%d\n", currentMatrix, msec / 1000, msec % 1000);
fclose(f);
}
// Print times to a .txt
void fprint_sgemm_time(int matrixSize, int iterationnr, int msec, char *fileName) {
FILE *f = fopen(fileName, "a");
if (f == NULL) {
printf("an error occured when opening GPUMemCopyTimes.txt\n");
printf("Press any key to exit...");
getchar();
exit(1);
}
fprintf(f, "iteration-%d-", iterationnr);
fprintf(f, "matrixSize-%d-", matrixSize);
fprintf(f, "time-%d%d\n", msec / 1000, msec % 1000);
fclose(f);
}
// Print a given matrix's entries to a file
void output_matrix(const float *A, int nr_rows_A, int nr_cols_A, char *fileName) {
FILE *f = fopen(fileName, "a");
if (f == NULL) {
printf("an error occured when opening a file\n");
printf("Press any key to exit...");
getchar();
exit(1);
}
for (int i = 0; i < nr_rows_A; ++i){
for (int j = 0; j < nr_cols_A; ++j){
fprintf(f, "%f, ", A[j * nr_rows_A + i]);
}
fprintf(f, "\n");
}
fprintf(f, "\n\n");
fclose(f);
}
|
efcc5f989fb088a7788339f33bc63fb9a2df917c.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2020 Neka-Nat
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
**/
#include <hip/hip_runtime.h>
#include <thrust/iterator/constant_iterator.h>
#include "cupoch/geometry/boundingvolume.h"
#include "cupoch/geometry/graph.h"
#include "cupoch/geometry/lineset.h"
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/geometry/trianglemesh.h"
#include "cupoch/geometry/distancetransform.h"
#include "cupoch/geometry/geometry_functor.h"
#include "cupoch/geometry/voxelgrid.h"
#include "cupoch/utility/platform.h"
#include "cupoch/utility/range.h"
#include "cupoch/visualization/shader/shader.h"
#include "cupoch/visualization/shader/simple_shader.h"
#include "cupoch/visualization/utility/color_map.h"
#include "cupoch/visualization/visualizer/render_option.h"
using namespace cupoch;
using namespace cupoch::visualization;
using namespace cupoch::visualization::glsl;
namespace {
// Vertex indices of 12 lines in a cuboid
__constant__ int cuboid_lines_vertex_indices[12][2] = {
{0, 1}, {0, 2}, {0, 4}, {3, 1}, {3, 2}, {3, 7},
{5, 1}, {5, 4}, {5, 7}, {6, 2}, {6, 4}, {6, 7},
};
struct copy_pointcloud_functor {
copy_pointcloud_functor(bool has_colors,
RenderOption::PointColorOption color_option,
const ViewControl &view)
: has_colors_(has_colors), color_option_(color_option), view_(view){};
const bool has_colors_;
const RenderOption::PointColorOption color_option_;
const ViewControl view_;
const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption();
__device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()(
const thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> &pt_cl) {
const Eigen::Vector3f &point = thrust::get<0>(pt_cl);
const Eigen::Vector3f &color = thrust::get<1>(pt_cl);
Eigen::Vector4f color_tmp;
color_tmp[3] = 1.0;
switch (color_option_) {
case RenderOption::PointColorOption::XCoordinate:
color_tmp.head<3>() = GetColorMapColor(
view_.GetBoundingBox().GetXPercentage(point(0)),
colormap_option_);
break;
case RenderOption::PointColorOption::YCoordinate:
color_tmp.head<3>() = GetColorMapColor(
view_.GetBoundingBox().GetYPercentage(point(1)),
colormap_option_);
break;
case RenderOption::PointColorOption::ZCoordinate:
color_tmp.head<3>() = GetColorMapColor(
view_.GetBoundingBox().GetZPercentage(point(2)),
colormap_option_);
break;
case RenderOption::PointColorOption::Color:
case RenderOption::PointColorOption::Default:
default:
if (has_colors_) {
color_tmp.head<3>() = color;
} else {
color_tmp.head<3>() = GetColorMapColor(
view_.GetBoundingBox().GetZPercentage(point(2)),
colormap_option_);
}
break;
}
return thrust::make_tuple(point, color_tmp);
}
};
struct copy_lineset_functor {
copy_lineset_functor(
const thrust::pair<Eigen::Vector3f, Eigen::Vector3f> *line_coords,
const Eigen::Vector3f *line_colors,
bool has_colors)
: line_coords_(line_coords),
line_colors_(line_colors),
has_colors_(has_colors){};
const thrust::pair<Eigen::Vector3f, Eigen::Vector3f> *line_coords_;
const Eigen::Vector3f *line_colors_;
const bool has_colors_;
__device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()(
size_t k) const {
int i = k / 2;
int j = k % 2;
Eigen::Vector4f color_tmp;
color_tmp[3] = 1.0;
color_tmp.head<3>() =
(has_colors_) ? line_colors_[i] : Eigen::Vector3f::Ones();
if (j == 0) {
return thrust::make_tuple(line_coords_[i].first, color_tmp);
} else {
return thrust::make_tuple(line_coords_[i].second, color_tmp);
}
}
};
struct line_coordinates_functor {
line_coordinates_functor(const Eigen::Vector3f *points) : points_(points){};
const Eigen::Vector3f *points_;
__device__ thrust::pair<Eigen::Vector3f, Eigen::Vector3f> operator()(
const Eigen::Vector2i &idxs) const {
return thrust::make_pair(points_[idxs[0]], points_[idxs[1]]);
}
};
struct copy_trianglemesh_functor {
copy_trianglemesh_functor(const Eigen::Vector3f *vertices,
const int *triangles,
const Eigen::Vector3f *vertex_colors,
bool has_vertex_colors,
RenderOption::MeshColorOption color_option,
const Eigen::Vector3f &default_mesh_color,
const ViewControl &view)
: vertices_(vertices),
triangles_(triangles),
vertex_colors_(vertex_colors),
has_vertex_colors_(has_vertex_colors),
color_option_(color_option),
default_mesh_color_(default_mesh_color),
view_(view){};
const Eigen::Vector3f *vertices_;
const int *triangles_;
const Eigen::Vector3f *vertex_colors_;
const bool has_vertex_colors_;
const RenderOption::MeshColorOption color_option_;
const Eigen::Vector3f default_mesh_color_;
const ViewControl view_;
const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption();
__device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()(
size_t k) const {
size_t vi = triangles_[k];
const auto &vertex = vertices_[vi];
Eigen::Vector4f color_tmp;
color_tmp[3] = 1.0;
switch (color_option_) {
case RenderOption::MeshColorOption::XCoordinate:
color_tmp.head<3>() = GetColorMapColor(
view_.GetBoundingBox().GetXPercentage(vertex(0)),
colormap_option_);
break;
case RenderOption::MeshColorOption::YCoordinate:
color_tmp.head<3>() = GetColorMapColor(
view_.GetBoundingBox().GetYPercentage(vertex(1)),
colormap_option_);
break;
case RenderOption::MeshColorOption::ZCoordinate:
color_tmp.head<3>() = GetColorMapColor(
view_.GetBoundingBox().GetZPercentage(vertex(2)),
colormap_option_);
break;
case RenderOption::MeshColorOption::Color:
if (has_vertex_colors_) {
color_tmp.head<3>() = vertex_colors_[vi];
break;
}
case RenderOption::MeshColorOption::Default:
default:
color_tmp.head<3>() = default_mesh_color_;
break;
}
return thrust::make_tuple(vertex, color_tmp);
}
};
struct copy_voxelgrid_line_functor {
copy_voxelgrid_line_functor(const Eigen::Vector3f *vertices,
const geometry::Voxel *voxels,
bool has_colors,
RenderOption::MeshColorOption color_option,
const Eigen::Vector3f &default_mesh_color,
const ViewControl &view)
: vertices_(vertices),
voxels_(voxels),
has_colors_(has_colors),
color_option_(color_option),
default_mesh_color_(default_mesh_color),
view_(view){};
const Eigen::Vector3f *vertices_;
const geometry::Voxel *voxels_;
const bool has_colors_;
const RenderOption::MeshColorOption color_option_;
const Eigen::Vector3f default_mesh_color_;
const ViewControl view_;
const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption();
__device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()(
size_t idx) const {
int i = idx / (12 * 2);
int jk = idx % (12 * 2);
int j = jk / 2;
int k = jk % 2;
// Voxel color (applied to all points)
Eigen::Vector4f voxel_color;
voxel_color[3] = 1.0;
switch (color_option_) {
case RenderOption::MeshColorOption::XCoordinate:
voxel_color.head<3>() =
GetColorMapColor(view_.GetBoundingBox().GetXPercentage(
vertices_[i * 8](0)),
colormap_option_);
break;
case RenderOption::MeshColorOption::YCoordinate:
voxel_color.head<3>() =
GetColorMapColor(view_.GetBoundingBox().GetYPercentage(
vertices_[i * 8](1)),
colormap_option_);
break;
case RenderOption::MeshColorOption::ZCoordinate:
voxel_color.head<3>() =
GetColorMapColor(view_.GetBoundingBox().GetZPercentage(
vertices_[i * 8](2)),
colormap_option_);
break;
case RenderOption::MeshColorOption::Color:
if (has_colors_) {
voxel_color.head<3>() = voxels_[i].color_;
break;
}
case RenderOption::MeshColorOption::Default:
default:
voxel_color.head<3>() = default_mesh_color_;
break;
}
return thrust::make_tuple(
vertices_[i * 8 + cuboid_lines_vertex_indices[j][k]],
voxel_color);
}
};
struct copy_distance_voxel_functor {
copy_distance_voxel_functor(float voxel_size,
int resolution,
const Eigen::Vector3f& origin,
float distance_max)
: voxel_size_(voxel_size), resolution_(resolution),
origin_(origin), distance_max_(distance_max){};
const float voxel_size_;
const int resolution_;
const Eigen::Vector3f origin_;
const float distance_max_;
__device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f>
operator()(const thrust::tuple<size_t, geometry::DistanceVoxel>& kv) const {
int idx = thrust::get<0>(kv);
geometry::DistanceVoxel v = thrust::get<1>(kv);
int res2 = resolution_ * resolution_;
int x = idx / res2;
int yz = idx % res2;
int y = yz / resolution_;
int z = yz % resolution_;
// Voxel color (applied to all points)
Eigen::Vector4f voxel_color = Eigen::Vector4f::Ones();
int h_res = resolution_ / 2;
Eigen::Vector3f pt = (Eigen::Vector3i(x - h_res, y - h_res, z - h_res).cast<float>() + Eigen::Vector3f::Constant(0.5)) * voxel_size_ - origin_;
voxel_color[3] = 1.0 - min(v.distance_, distance_max_) / distance_max_;
return thrust::make_tuple(pt, voxel_color);
}
};
struct alpha_greater_functor {
__device__ bool operator() (const thrust::tuple<Eigen::Vector3f, Eigen::Vector4f>& lhs,
const thrust::tuple<Eigen::Vector3f, Eigen::Vector4f>& rhs) const {
return thrust::get<1>(lhs)[3] > thrust::get<1>(rhs)[3];
}
};
} // namespace
bool SimpleShader::Compile() {
if (CompileShaders(simple_vertex_shader, NULL, simple_fragment_shader) ==
false) {
PrintShaderWarning("Compiling shaders failed.");
return false;
}
vertex_position_ = glGetAttribLocation(program_, "vertex_position");
vertex_color_ = glGetAttribLocation(program_, "vertex_color");
MVP_ = glGetUniformLocation(program_, "MVP");
return true;
}
void SimpleShader::Release() {
UnbindGeometry(true);
ReleaseProgram();
}
bool SimpleShader::BindGeometry(const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
// If there is already geometry, we first unbind it.
// We use GL_STATIC_DRAW. When geometry changes, we clear buffers and
// rebind the geometry. Note that this approach is slow. If the geometry is
// changing per frame, consider implementing a new ShaderWrapper using
// GL_STREAM_DRAW, and replace InvalidateGeometry() with Buffer Object
// Streaming mechanisms.
UnbindGeometry();
// Prepare data to be passed to GPU
const size_t num_data_size = GetDataSize(geometry);
// Create buffers and bind the geometry
glGenBuffers(1, &vertex_position_buffer_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_);
glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0,
GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0],
vertex_position_buffer_,
hipGraphicsMapFlagsNone));
glGenBuffers(1, &vertex_color_buffer_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_color_buffer_);
glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector4f), 0,
GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1],
vertex_color_buffer_,
hipGraphicsMapFlagsNone));
Eigen::Vector3f *raw_points_ptr;
Eigen::Vector4f *raw_colors_ptr;
size_t n_bytes;
cudaSafeCall(hipGraphicsMapResources(2, cuda_graphics_resources_));
cudaSafeCall(hipGraphicsResourceGetMappedPointer(
(void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0]));
cudaSafeCall(hipGraphicsResourceGetMappedPointer(
(void **)&raw_colors_ptr, &n_bytes, cuda_graphics_resources_[1]));
thrust::device_ptr<Eigen::Vector3f> dev_points_ptr =
thrust::device_pointer_cast(raw_points_ptr);
thrust::device_ptr<Eigen::Vector4f> dev_colors_ptr =
thrust::device_pointer_cast(raw_colors_ptr);
if (PrepareBinding(geometry, option, view, dev_points_ptr,
dev_colors_ptr) == false) {
PrintShaderWarning("Binding failed when preparing data.");
return false;
}
Unmap(2);
bound_ = true;
return true;
}
bool SimpleShader::RenderGeometry(const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (PrepareRendering(geometry, option, view) == false) {
PrintShaderWarning("Rendering failed during preparation.");
return false;
}
glUseProgram(program_);
glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data());
glEnableVertexAttribArray(vertex_position_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_);
glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(vertex_color_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_color_buffer_);
glVertexAttribPointer(vertex_color_, 4, GL_FLOAT, GL_FALSE, 0, NULL);
glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_);
glDisableVertexAttribArray(vertex_position_);
glDisableVertexAttribArray(vertex_color_);
return true;
}
void SimpleShader::UnbindGeometry(bool finalize) {
if (bound_) {
if (!finalize) {
cudaSafeCall(hipGraphicsUnregisterResource(
cuda_graphics_resources_[0]));
cudaSafeCall(hipGraphicsUnregisterResource(
cuda_graphics_resources_[1]));
}
glDeleteBuffers(1, &vertex_position_buffer_);
glDeleteBuffers(1, &vertex_color_buffer_);
bound_ = false;
}
}
bool SimpleShaderForPointCloud::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::PointCloud) {
PrintShaderWarning("Rendering type is not geometry::PointCloud.");
return false;
}
glPointSize(GLfloat(option.point_size_));
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
return true;
}
bool SimpleShaderForPointCloud::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::PointCloud) {
PrintShaderWarning("Rendering type is not geometry::PointCloud.");
return false;
}
const geometry::PointCloud &pointcloud =
(const geometry::PointCloud &)geometry;
if (pointcloud.HasPoints() == false) {
PrintShaderWarning("Binding failed with empty pointcloud.");
return false;
}
copy_pointcloud_functor func(pointcloud.HasColors(),
option.point_color_option_, view);
if (pointcloud.HasColors()) {
thrust::transform(
make_tuple_begin(pointcloud.points_, pointcloud.colors_),
make_tuple_end(pointcloud.points_, pointcloud.colors_),
make_tuple_iterator(points, colors), func);
} else {
thrust::transform(
make_tuple_iterator(pointcloud.points_.begin(),
thrust::constant_iterator<Eigen::Vector3f>(
Eigen::Vector3f::Zero())),
make_tuple_iterator(pointcloud.points_.end(),
thrust::constant_iterator<Eigen::Vector3f>(
Eigen::Vector3f::Zero())),
make_tuple_iterator(points, colors), func);
}
draw_arrays_mode_ = GL_POINTS;
draw_arrays_size_ = GLsizei(pointcloud.points_.size());
return true;
}
size_t SimpleShaderForPointCloud::GetDataSize(
const geometry::Geometry &geometry) const {
return ((const geometry::PointCloud &)geometry).points_.size();
}
bool SimpleShaderForLineSet::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::LineSet) {
PrintShaderWarning("Rendering type is not geometry::LineSet.");
return false;
}
glLineWidth(GLfloat(option.line_width_));
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
return true;
}
bool SimpleShaderForLineSet::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::LineSet) {
PrintShaderWarning("Rendering type is not geometry::LineSet.");
return false;
}
const geometry::LineSet<3> &lineset =
(const geometry::LineSet<3> &)geometry;
if (lineset.HasLines() == false) {
PrintShaderWarning("Binding failed with empty geometry::LineSet.");
return false;
}
utility::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>>
line_coords(lineset.lines_.size());
line_coordinates_functor func_line(
thrust::raw_pointer_cast(lineset.points_.data()));
thrust::transform(lineset.lines_.begin(), lineset.lines_.end(),
line_coords.begin(), func_line);
copy_lineset_functor func_cp(
thrust::raw_pointer_cast(line_coords.data()),
thrust::raw_pointer_cast(lineset.colors_.data()),
lineset.HasColors());
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(lineset.lines_.size() * 2),
make_tuple_iterator(points, colors), func_cp);
draw_arrays_mode_ = GL_LINES;
draw_arrays_size_ = GLsizei(lineset.lines_.size() * 2);
return true;
}
size_t SimpleShaderForLineSet::GetDataSize(
const geometry::Geometry &geometry) const {
return ((const geometry::LineSet<3> &)geometry).lines_.size() * 2;
}
bool SimpleShaderForGraphNode::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) {
PrintShaderWarning("Rendering type is not geometry::Graph.");
return false;
}
glPointSize(GLfloat(option.point_size_));
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
return true;
}
bool SimpleShaderForGraphNode::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) {
PrintShaderWarning("Rendering type is not geometry::Graph.");
return false;
}
const geometry::Graph<3> &graph = (const geometry::Graph<3> &)geometry;
if (graph.HasPoints() == false) {
PrintShaderWarning("Binding failed with empty graph.");
return false;
}
copy_pointcloud_functor func(graph.HasColors(), option.point_color_option_,
view);
if (graph.HasNodeColors()) {
thrust::transform(make_tuple_begin(graph.points_, graph.node_colors_),
make_tuple_end(graph.points_, graph.node_colors_),
make_tuple_iterator(points, colors), func);
} else {
thrust::transform(
make_tuple_iterator(graph.points_.begin(),
thrust::constant_iterator<Eigen::Vector3f>(
Eigen::Vector3f::Ones())),
make_tuple_iterator(graph.points_.end(),
thrust::constant_iterator<Eigen::Vector3f>(
Eigen::Vector3f::Ones())),
make_tuple_iterator(points, colors), func);
}
draw_arrays_mode_ = GL_POINTS;
draw_arrays_size_ = GLsizei(graph.points_.size());
return true;
}
size_t SimpleShaderForGraphNode::GetDataSize(
const geometry::Geometry &geometry) const {
return ((const geometry::Graph<3> &)geometry).points_.size();
}
bool SimpleShaderForGraphEdge::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) {
PrintShaderWarning("Rendering type is not geometry::Graph.");
return false;
}
glLineWidth(GLfloat(option.line_width_));
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
return true;
}
bool SimpleShaderForGraphEdge::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) {
PrintShaderWarning("Rendering type is not geometry::Graph.");
return false;
}
const geometry::Graph<3> &graph = (const geometry::Graph<3> &)geometry;
if (graph.HasLines() == false) {
PrintShaderWarning("Binding failed with empty geometry::Graph.");
return false;
}
utility::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>>
line_coords(graph.lines_.size());
line_coordinates_functor func_line(
thrust::raw_pointer_cast(graph.points_.data()));
thrust::transform(graph.lines_.begin(), graph.lines_.end(),
line_coords.begin(), func_line);
copy_lineset_functor func_cp(thrust::raw_pointer_cast(line_coords.data()),
thrust::raw_pointer_cast(graph.colors_.data()),
graph.HasColors());
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(graph.lines_.size() * 2),
make_tuple_iterator(points, colors), func_cp);
draw_arrays_mode_ = GL_LINES;
draw_arrays_size_ = GLsizei(graph.lines_.size() * 2);
return true;
}
size_t SimpleShaderForGraphEdge::GetDataSize(
const geometry::Geometry &geometry) const {
return ((const geometry::Graph<3> &)geometry).lines_.size() * 2;
}
bool SimpleShaderForAxisAlignedBoundingBox::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::AxisAlignedBoundingBox) {
PrintShaderWarning(
"Rendering type is not geometry::AxisAlignedBoundingBox.");
return false;
}
glLineWidth(GLfloat(option.line_width_));
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
return true;
}
bool SimpleShaderForAxisAlignedBoundingBox::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::AxisAlignedBoundingBox) {
PrintShaderWarning(
"Rendering type is not geometry::AxisAlignedBoundingBox.");
return false;
}
auto lineset = geometry::LineSet<3>::CreateFromAxisAlignedBoundingBox(
(const geometry::AxisAlignedBoundingBox &)geometry);
utility::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>>
line_coords(lineset->lines_.size());
line_coordinates_functor func_line(
thrust::raw_pointer_cast(lineset->points_.data()));
thrust::transform(lineset->lines_.begin(), lineset->lines_.end(),
line_coords.begin(), func_line);
copy_lineset_functor func_cp(
thrust::raw_pointer_cast(line_coords.data()),
thrust::raw_pointer_cast(lineset->colors_.data()),
lineset->HasColors());
thrust::transform(
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(lineset->lines_.size() * 2),
make_tuple_iterator(points, colors), func_cp);
draw_arrays_mode_ = GL_LINES;
draw_arrays_size_ = GLsizei(lineset->lines_.size() * 2);
return true;
}
size_t SimpleShaderForAxisAlignedBoundingBox::GetDataSize(
const geometry::Geometry &geometry) const {
auto lineset = geometry::LineSet<3>::CreateFromAxisAlignedBoundingBox(
(const geometry::AxisAlignedBoundingBox &)geometry);
return lineset->lines_.size() * 2;
}
bool SimpleShaderForTriangleMesh::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::TriangleMesh) {
PrintShaderWarning("Rendering type is not geometry::TriangleMesh.");
return false;
}
if (option.mesh_show_back_face_) {
glDisable(GL_CULL_FACE);
} else {
glEnable(GL_CULL_FACE);
}
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
if (option.mesh_show_wireframe_) {
glEnable(GL_POLYGON_OFFSET_FILL);
glPolygonOffset(1.0, 1.0);
} else {
glDisable(GL_POLYGON_OFFSET_FILL);
}
return true;
}
bool SimpleShaderForTriangleMesh::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::TriangleMesh) {
PrintShaderWarning("Rendering type is not geometry::TriangleMesh.");
return false;
}
const geometry::TriangleMesh &mesh =
(const geometry::TriangleMesh &)geometry;
if (mesh.HasTriangles() == false) {
PrintShaderWarning("Binding failed with empty triangle mesh.");
return false;
}
copy_trianglemesh_functor func(
thrust::raw_pointer_cast(mesh.vertices_.data()),
(int *)(thrust::raw_pointer_cast(mesh.triangles_.data())),
thrust::raw_pointer_cast(mesh.vertex_colors_.data()),
mesh.HasVertexColors(), option.mesh_color_option_,
option.default_mesh_color_, view);
thrust::transform(
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(mesh.triangles_.size() * 3),
make_tuple_iterator(points, colors), func);
draw_arrays_mode_ = GL_TRIANGLES;
draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3);
return true;
}
size_t SimpleShaderForTriangleMesh::GetDataSize(
const geometry::Geometry &geometry) const {
return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3;
}
bool SimpleShaderForVoxelGridLine::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::VoxelGrid) {
PrintShaderWarning("Rendering type is not geometry::VoxelGrid.");
return false;
}
glDisable(GL_CULL_FACE);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
return true;
}
bool SimpleShaderForVoxelGridLine::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::VoxelGrid) {
PrintShaderWarning("Rendering type is not geometry::VoxelGrid.");
return false;
}
const geometry::VoxelGrid &voxel_grid =
(const geometry::VoxelGrid &)geometry;
if (voxel_grid.HasVoxels() == false) {
PrintShaderWarning("Binding failed with empty voxel grid.");
return false;
}
utility::device_vector<Eigen::Vector3f> vertices(
voxel_grid.voxels_values_.size() * 8);
thrust::tiled_range<
thrust::counting_iterator<size_t>>
irange(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(8),
voxel_grid.voxels_values_.size());
auto gfunc = geometry::get_grid_index_functor<geometry::Voxel, Eigen::Vector3i>();
auto begin = thrust::make_transform_iterator(voxel_grid.voxels_values_.begin(), gfunc);
thrust::repeated_range<decltype(begin)>
vrange(begin, thrust::make_transform_iterator(voxel_grid.voxels_values_.end(), gfunc), 8);
geometry::compute_voxel_vertices_functor<Eigen::Vector3i> func1(voxel_grid.origin_, voxel_grid.voxel_size_);
thrust::transform(make_tuple_begin(irange, vrange), make_tuple_end(irange, vrange),
vertices.begin(), func1);
size_t n_out = voxel_grid.voxels_values_.size() * 12 * 2;
copy_voxelgrid_line_functor func2(
thrust::raw_pointer_cast(vertices.data()),
thrust::raw_pointer_cast(voxel_grid.voxels_values_.data()),
voxel_grid.HasColors(), option.mesh_color_option_,
option.default_mesh_color_, view);
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_out),
make_tuple_iterator(points, colors), func2);
draw_arrays_mode_ = GL_LINES;
draw_arrays_size_ = GLsizei(n_out);
return true;
}
size_t SimpleShaderForVoxelGridLine::GetDataSize(
const geometry::Geometry &geometry) const {
return ((const geometry::VoxelGrid &)geometry).voxels_values_.size() * 12 *
2;
}
bool SimpleShaderForDistanceTransform::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::DistanceTransform) {
PrintShaderWarning("Rendering type is not geometry::DistanceTransform.");
return false;
}
glPointSize(GLfloat(option.point_size_));
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
return true;
}
bool SimpleShaderForDistanceTransform::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::DistanceTransform) {
PrintShaderWarning("Rendering type is not geometry::DistanceTransform.");
return false;
}
const geometry::DistanceTransform &dist_trans =
(const geometry::DistanceTransform &)geometry;
if (dist_trans.IsEmpty()) {
PrintShaderWarning("Binding failed with empty distance transform.");
return false;
}
size_t n_out = dist_trans.voxels_.size();
copy_distance_voxel_functor
func(dist_trans.voxel_size_, dist_trans.resolution_, dist_trans.origin_,
dist_trans.voxel_size_ * dist_trans.resolution_ * 0.1);
thrust::transform(make_tuple_iterator(thrust::make_counting_iterator<size_t>(0), dist_trans.voxels_.begin()),
make_tuple_iterator(thrust::make_counting_iterator(n_out), dist_trans.voxels_.end()),
make_tuple_iterator(points, colors), func);
auto tp_begin = make_tuple_iterator(points, colors);
thrust::sort(utility::exec_policy(0)->on(0),
tp_begin, tp_begin + n_out, alpha_greater_functor());
draw_arrays_mode_ = GL_POINTS;
draw_arrays_size_ = GLsizei(n_out);
return true;
}
size_t SimpleShaderForDistanceTransform::GetDataSize(
const geometry::Geometry &geometry) const {
int res = ((const geometry::DistanceTransform &)geometry).resolution_;
return res * res * res;
}
| efcc5f989fb088a7788339f33bc63fb9a2df917c.cu | /**
* Copyright (c) 2020 Neka-Nat
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
**/
#include <cuda_runtime.h>
#include <thrust/iterator/constant_iterator.h>
#include "cupoch/geometry/boundingvolume.h"
#include "cupoch/geometry/graph.h"
#include "cupoch/geometry/lineset.h"
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/geometry/trianglemesh.h"
#include "cupoch/geometry/distancetransform.h"
#include "cupoch/geometry/geometry_functor.h"
#include "cupoch/geometry/voxelgrid.h"
#include "cupoch/utility/platform.h"
#include "cupoch/utility/range.h"
#include "cupoch/visualization/shader/shader.h"
#include "cupoch/visualization/shader/simple_shader.h"
#include "cupoch/visualization/utility/color_map.h"
#include "cupoch/visualization/visualizer/render_option.h"
using namespace cupoch;
using namespace cupoch::visualization;
using namespace cupoch::visualization::glsl;
namespace {
// Vertex indices of 12 lines in a cuboid
__constant__ int cuboid_lines_vertex_indices[12][2] = {
{0, 1}, {0, 2}, {0, 4}, {3, 1}, {3, 2}, {3, 7},
{5, 1}, {5, 4}, {5, 7}, {6, 2}, {6, 4}, {6, 7},
};
struct copy_pointcloud_functor {
copy_pointcloud_functor(bool has_colors,
RenderOption::PointColorOption color_option,
const ViewControl &view)
: has_colors_(has_colors), color_option_(color_option), view_(view){};
const bool has_colors_;
const RenderOption::PointColorOption color_option_;
const ViewControl view_;
const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption();
__device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()(
const thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> &pt_cl) {
const Eigen::Vector3f &point = thrust::get<0>(pt_cl);
const Eigen::Vector3f &color = thrust::get<1>(pt_cl);
Eigen::Vector4f color_tmp;
color_tmp[3] = 1.0;
switch (color_option_) {
case RenderOption::PointColorOption::XCoordinate:
color_tmp.head<3>() = GetColorMapColor(
view_.GetBoundingBox().GetXPercentage(point(0)),
colormap_option_);
break;
case RenderOption::PointColorOption::YCoordinate:
color_tmp.head<3>() = GetColorMapColor(
view_.GetBoundingBox().GetYPercentage(point(1)),
colormap_option_);
break;
case RenderOption::PointColorOption::ZCoordinate:
color_tmp.head<3>() = GetColorMapColor(
view_.GetBoundingBox().GetZPercentage(point(2)),
colormap_option_);
break;
case RenderOption::PointColorOption::Color:
case RenderOption::PointColorOption::Default:
default:
if (has_colors_) {
color_tmp.head<3>() = color;
} else {
color_tmp.head<3>() = GetColorMapColor(
view_.GetBoundingBox().GetZPercentage(point(2)),
colormap_option_);
}
break;
}
return thrust::make_tuple(point, color_tmp);
}
};
struct copy_lineset_functor {
copy_lineset_functor(
const thrust::pair<Eigen::Vector3f, Eigen::Vector3f> *line_coords,
const Eigen::Vector3f *line_colors,
bool has_colors)
: line_coords_(line_coords),
line_colors_(line_colors),
has_colors_(has_colors){};
const thrust::pair<Eigen::Vector3f, Eigen::Vector3f> *line_coords_;
const Eigen::Vector3f *line_colors_;
const bool has_colors_;
__device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()(
size_t k) const {
int i = k / 2;
int j = k % 2;
Eigen::Vector4f color_tmp;
color_tmp[3] = 1.0;
color_tmp.head<3>() =
(has_colors_) ? line_colors_[i] : Eigen::Vector3f::Ones();
if (j == 0) {
return thrust::make_tuple(line_coords_[i].first, color_tmp);
} else {
return thrust::make_tuple(line_coords_[i].second, color_tmp);
}
}
};
struct line_coordinates_functor {
line_coordinates_functor(const Eigen::Vector3f *points) : points_(points){};
const Eigen::Vector3f *points_;
__device__ thrust::pair<Eigen::Vector3f, Eigen::Vector3f> operator()(
const Eigen::Vector2i &idxs) const {
return thrust::make_pair(points_[idxs[0]], points_[idxs[1]]);
}
};
struct copy_trianglemesh_functor {
copy_trianglemesh_functor(const Eigen::Vector3f *vertices,
const int *triangles,
const Eigen::Vector3f *vertex_colors,
bool has_vertex_colors,
RenderOption::MeshColorOption color_option,
const Eigen::Vector3f &default_mesh_color,
const ViewControl &view)
: vertices_(vertices),
triangles_(triangles),
vertex_colors_(vertex_colors),
has_vertex_colors_(has_vertex_colors),
color_option_(color_option),
default_mesh_color_(default_mesh_color),
view_(view){};
const Eigen::Vector3f *vertices_;
const int *triangles_;
const Eigen::Vector3f *vertex_colors_;
const bool has_vertex_colors_;
const RenderOption::MeshColorOption color_option_;
const Eigen::Vector3f default_mesh_color_;
const ViewControl view_;
const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption();
__device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()(
size_t k) const {
size_t vi = triangles_[k];
const auto &vertex = vertices_[vi];
Eigen::Vector4f color_tmp;
color_tmp[3] = 1.0;
switch (color_option_) {
case RenderOption::MeshColorOption::XCoordinate:
color_tmp.head<3>() = GetColorMapColor(
view_.GetBoundingBox().GetXPercentage(vertex(0)),
colormap_option_);
break;
case RenderOption::MeshColorOption::YCoordinate:
color_tmp.head<3>() = GetColorMapColor(
view_.GetBoundingBox().GetYPercentage(vertex(1)),
colormap_option_);
break;
case RenderOption::MeshColorOption::ZCoordinate:
color_tmp.head<3>() = GetColorMapColor(
view_.GetBoundingBox().GetZPercentage(vertex(2)),
colormap_option_);
break;
case RenderOption::MeshColorOption::Color:
if (has_vertex_colors_) {
color_tmp.head<3>() = vertex_colors_[vi];
break;
}
case RenderOption::MeshColorOption::Default:
default:
color_tmp.head<3>() = default_mesh_color_;
break;
}
return thrust::make_tuple(vertex, color_tmp);
}
};
struct copy_voxelgrid_line_functor {
copy_voxelgrid_line_functor(const Eigen::Vector3f *vertices,
const geometry::Voxel *voxels,
bool has_colors,
RenderOption::MeshColorOption color_option,
const Eigen::Vector3f &default_mesh_color,
const ViewControl &view)
: vertices_(vertices),
voxels_(voxels),
has_colors_(has_colors),
color_option_(color_option),
default_mesh_color_(default_mesh_color),
view_(view){};
const Eigen::Vector3f *vertices_;
const geometry::Voxel *voxels_;
const bool has_colors_;
const RenderOption::MeshColorOption color_option_;
const Eigen::Vector3f default_mesh_color_;
const ViewControl view_;
const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption();
__device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()(
size_t idx) const {
int i = idx / (12 * 2);
int jk = idx % (12 * 2);
int j = jk / 2;
int k = jk % 2;
// Voxel color (applied to all points)
Eigen::Vector4f voxel_color;
voxel_color[3] = 1.0;
switch (color_option_) {
case RenderOption::MeshColorOption::XCoordinate:
voxel_color.head<3>() =
GetColorMapColor(view_.GetBoundingBox().GetXPercentage(
vertices_[i * 8](0)),
colormap_option_);
break;
case RenderOption::MeshColorOption::YCoordinate:
voxel_color.head<3>() =
GetColorMapColor(view_.GetBoundingBox().GetYPercentage(
vertices_[i * 8](1)),
colormap_option_);
break;
case RenderOption::MeshColorOption::ZCoordinate:
voxel_color.head<3>() =
GetColorMapColor(view_.GetBoundingBox().GetZPercentage(
vertices_[i * 8](2)),
colormap_option_);
break;
case RenderOption::MeshColorOption::Color:
if (has_colors_) {
voxel_color.head<3>() = voxels_[i].color_;
break;
}
case RenderOption::MeshColorOption::Default:
default:
voxel_color.head<3>() = default_mesh_color_;
break;
}
return thrust::make_tuple(
vertices_[i * 8 + cuboid_lines_vertex_indices[j][k]],
voxel_color);
}
};
struct copy_distance_voxel_functor {
copy_distance_voxel_functor(float voxel_size,
int resolution,
const Eigen::Vector3f& origin,
float distance_max)
: voxel_size_(voxel_size), resolution_(resolution),
origin_(origin), distance_max_(distance_max){};
const float voxel_size_;
const int resolution_;
const Eigen::Vector3f origin_;
const float distance_max_;
__device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f>
operator()(const thrust::tuple<size_t, geometry::DistanceVoxel>& kv) const {
int idx = thrust::get<0>(kv);
geometry::DistanceVoxel v = thrust::get<1>(kv);
int res2 = resolution_ * resolution_;
int x = idx / res2;
int yz = idx % res2;
int y = yz / resolution_;
int z = yz % resolution_;
// Voxel color (applied to all points)
Eigen::Vector4f voxel_color = Eigen::Vector4f::Ones();
int h_res = resolution_ / 2;
Eigen::Vector3f pt = (Eigen::Vector3i(x - h_res, y - h_res, z - h_res).cast<float>() + Eigen::Vector3f::Constant(0.5)) * voxel_size_ - origin_;
voxel_color[3] = 1.0 - min(v.distance_, distance_max_) / distance_max_;
return thrust::make_tuple(pt, voxel_color);
}
};
struct alpha_greater_functor {
__device__ bool operator() (const thrust::tuple<Eigen::Vector3f, Eigen::Vector4f>& lhs,
const thrust::tuple<Eigen::Vector3f, Eigen::Vector4f>& rhs) const {
return thrust::get<1>(lhs)[3] > thrust::get<1>(rhs)[3];
}
};
} // namespace
bool SimpleShader::Compile() {
if (CompileShaders(simple_vertex_shader, NULL, simple_fragment_shader) ==
false) {
PrintShaderWarning("Compiling shaders failed.");
return false;
}
vertex_position_ = glGetAttribLocation(program_, "vertex_position");
vertex_color_ = glGetAttribLocation(program_, "vertex_color");
MVP_ = glGetUniformLocation(program_, "MVP");
return true;
}
void SimpleShader::Release() {
UnbindGeometry(true);
ReleaseProgram();
}
bool SimpleShader::BindGeometry(const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
// If there is already geometry, we first unbind it.
// We use GL_STATIC_DRAW. When geometry changes, we clear buffers and
// rebind the geometry. Note that this approach is slow. If the geometry is
// changing per frame, consider implementing a new ShaderWrapper using
// GL_STREAM_DRAW, and replace InvalidateGeometry() with Buffer Object
// Streaming mechanisms.
UnbindGeometry();
// Prepare data to be passed to GPU
const size_t num_data_size = GetDataSize(geometry);
// Create buffers and bind the geometry
glGenBuffers(1, &vertex_position_buffer_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_);
glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0,
GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0],
vertex_position_buffer_,
cudaGraphicsMapFlagsNone));
glGenBuffers(1, &vertex_color_buffer_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_color_buffer_);
glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector4f), 0,
GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1],
vertex_color_buffer_,
cudaGraphicsMapFlagsNone));
Eigen::Vector3f *raw_points_ptr;
Eigen::Vector4f *raw_colors_ptr;
size_t n_bytes;
cudaSafeCall(cudaGraphicsMapResources(2, cuda_graphics_resources_));
cudaSafeCall(cudaGraphicsResourceGetMappedPointer(
(void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0]));
cudaSafeCall(cudaGraphicsResourceGetMappedPointer(
(void **)&raw_colors_ptr, &n_bytes, cuda_graphics_resources_[1]));
thrust::device_ptr<Eigen::Vector3f> dev_points_ptr =
thrust::device_pointer_cast(raw_points_ptr);
thrust::device_ptr<Eigen::Vector4f> dev_colors_ptr =
thrust::device_pointer_cast(raw_colors_ptr);
if (PrepareBinding(geometry, option, view, dev_points_ptr,
dev_colors_ptr) == false) {
PrintShaderWarning("Binding failed when preparing data.");
return false;
}
Unmap(2);
bound_ = true;
return true;
}
bool SimpleShader::RenderGeometry(const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (PrepareRendering(geometry, option, view) == false) {
PrintShaderWarning("Rendering failed during preparation.");
return false;
}
glUseProgram(program_);
glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data());
glEnableVertexAttribArray(vertex_position_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_);
glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(vertex_color_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_color_buffer_);
glVertexAttribPointer(vertex_color_, 4, GL_FLOAT, GL_FALSE, 0, NULL);
glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_);
glDisableVertexAttribArray(vertex_position_);
glDisableVertexAttribArray(vertex_color_);
return true;
}
void SimpleShader::UnbindGeometry(bool finalize) {
if (bound_) {
if (!finalize) {
cudaSafeCall(cudaGraphicsUnregisterResource(
cuda_graphics_resources_[0]));
cudaSafeCall(cudaGraphicsUnregisterResource(
cuda_graphics_resources_[1]));
}
glDeleteBuffers(1, &vertex_position_buffer_);
glDeleteBuffers(1, &vertex_color_buffer_);
bound_ = false;
}
}
bool SimpleShaderForPointCloud::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::PointCloud) {
PrintShaderWarning("Rendering type is not geometry::PointCloud.");
return false;
}
glPointSize(GLfloat(option.point_size_));
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
return true;
}
bool SimpleShaderForPointCloud::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::PointCloud) {
PrintShaderWarning("Rendering type is not geometry::PointCloud.");
return false;
}
const geometry::PointCloud &pointcloud =
(const geometry::PointCloud &)geometry;
if (pointcloud.HasPoints() == false) {
PrintShaderWarning("Binding failed with empty pointcloud.");
return false;
}
copy_pointcloud_functor func(pointcloud.HasColors(),
option.point_color_option_, view);
if (pointcloud.HasColors()) {
thrust::transform(
make_tuple_begin(pointcloud.points_, pointcloud.colors_),
make_tuple_end(pointcloud.points_, pointcloud.colors_),
make_tuple_iterator(points, colors), func);
} else {
thrust::transform(
make_tuple_iterator(pointcloud.points_.begin(),
thrust::constant_iterator<Eigen::Vector3f>(
Eigen::Vector3f::Zero())),
make_tuple_iterator(pointcloud.points_.end(),
thrust::constant_iterator<Eigen::Vector3f>(
Eigen::Vector3f::Zero())),
make_tuple_iterator(points, colors), func);
}
draw_arrays_mode_ = GL_POINTS;
draw_arrays_size_ = GLsizei(pointcloud.points_.size());
return true;
}
size_t SimpleShaderForPointCloud::GetDataSize(
const geometry::Geometry &geometry) const {
return ((const geometry::PointCloud &)geometry).points_.size();
}
bool SimpleShaderForLineSet::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::LineSet) {
PrintShaderWarning("Rendering type is not geometry::LineSet.");
return false;
}
glLineWidth(GLfloat(option.line_width_));
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
return true;
}
bool SimpleShaderForLineSet::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::LineSet) {
PrintShaderWarning("Rendering type is not geometry::LineSet.");
return false;
}
const geometry::LineSet<3> &lineset =
(const geometry::LineSet<3> &)geometry;
if (lineset.HasLines() == false) {
PrintShaderWarning("Binding failed with empty geometry::LineSet.");
return false;
}
utility::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>>
line_coords(lineset.lines_.size());
line_coordinates_functor func_line(
thrust::raw_pointer_cast(lineset.points_.data()));
thrust::transform(lineset.lines_.begin(), lineset.lines_.end(),
line_coords.begin(), func_line);
copy_lineset_functor func_cp(
thrust::raw_pointer_cast(line_coords.data()),
thrust::raw_pointer_cast(lineset.colors_.data()),
lineset.HasColors());
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(lineset.lines_.size() * 2),
make_tuple_iterator(points, colors), func_cp);
draw_arrays_mode_ = GL_LINES;
draw_arrays_size_ = GLsizei(lineset.lines_.size() * 2);
return true;
}
size_t SimpleShaderForLineSet::GetDataSize(
const geometry::Geometry &geometry) const {
return ((const geometry::LineSet<3> &)geometry).lines_.size() * 2;
}
bool SimpleShaderForGraphNode::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) {
PrintShaderWarning("Rendering type is not geometry::Graph.");
return false;
}
glPointSize(GLfloat(option.point_size_));
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
return true;
}
bool SimpleShaderForGraphNode::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) {
PrintShaderWarning("Rendering type is not geometry::Graph.");
return false;
}
const geometry::Graph<3> &graph = (const geometry::Graph<3> &)geometry;
if (graph.HasPoints() == false) {
PrintShaderWarning("Binding failed with empty graph.");
return false;
}
copy_pointcloud_functor func(graph.HasColors(), option.point_color_option_,
view);
if (graph.HasNodeColors()) {
thrust::transform(make_tuple_begin(graph.points_, graph.node_colors_),
make_tuple_end(graph.points_, graph.node_colors_),
make_tuple_iterator(points, colors), func);
} else {
thrust::transform(
make_tuple_iterator(graph.points_.begin(),
thrust::constant_iterator<Eigen::Vector3f>(
Eigen::Vector3f::Ones())),
make_tuple_iterator(graph.points_.end(),
thrust::constant_iterator<Eigen::Vector3f>(
Eigen::Vector3f::Ones())),
make_tuple_iterator(points, colors), func);
}
draw_arrays_mode_ = GL_POINTS;
draw_arrays_size_ = GLsizei(graph.points_.size());
return true;
}
size_t SimpleShaderForGraphNode::GetDataSize(
const geometry::Geometry &geometry) const {
return ((const geometry::Graph<3> &)geometry).points_.size();
}
bool SimpleShaderForGraphEdge::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) {
PrintShaderWarning("Rendering type is not geometry::Graph.");
return false;
}
glLineWidth(GLfloat(option.line_width_));
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
return true;
}
bool SimpleShaderForGraphEdge::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) {
PrintShaderWarning("Rendering type is not geometry::Graph.");
return false;
}
const geometry::Graph<3> &graph = (const geometry::Graph<3> &)geometry;
if (graph.HasLines() == false) {
PrintShaderWarning("Binding failed with empty geometry::Graph.");
return false;
}
utility::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>>
line_coords(graph.lines_.size());
line_coordinates_functor func_line(
thrust::raw_pointer_cast(graph.points_.data()));
thrust::transform(graph.lines_.begin(), graph.lines_.end(),
line_coords.begin(), func_line);
copy_lineset_functor func_cp(thrust::raw_pointer_cast(line_coords.data()),
thrust::raw_pointer_cast(graph.colors_.data()),
graph.HasColors());
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(graph.lines_.size() * 2),
make_tuple_iterator(points, colors), func_cp);
draw_arrays_mode_ = GL_LINES;
draw_arrays_size_ = GLsizei(graph.lines_.size() * 2);
return true;
}
size_t SimpleShaderForGraphEdge::GetDataSize(
const geometry::Geometry &geometry) const {
return ((const geometry::Graph<3> &)geometry).lines_.size() * 2;
}
bool SimpleShaderForAxisAlignedBoundingBox::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::AxisAlignedBoundingBox) {
PrintShaderWarning(
"Rendering type is not geometry::AxisAlignedBoundingBox.");
return false;
}
glLineWidth(GLfloat(option.line_width_));
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
return true;
}
bool SimpleShaderForAxisAlignedBoundingBox::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::AxisAlignedBoundingBox) {
PrintShaderWarning(
"Rendering type is not geometry::AxisAlignedBoundingBox.");
return false;
}
auto lineset = geometry::LineSet<3>::CreateFromAxisAlignedBoundingBox(
(const geometry::AxisAlignedBoundingBox &)geometry);
utility::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>>
line_coords(lineset->lines_.size());
line_coordinates_functor func_line(
thrust::raw_pointer_cast(lineset->points_.data()));
thrust::transform(lineset->lines_.begin(), lineset->lines_.end(),
line_coords.begin(), func_line);
copy_lineset_functor func_cp(
thrust::raw_pointer_cast(line_coords.data()),
thrust::raw_pointer_cast(lineset->colors_.data()),
lineset->HasColors());
thrust::transform(
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(lineset->lines_.size() * 2),
make_tuple_iterator(points, colors), func_cp);
draw_arrays_mode_ = GL_LINES;
draw_arrays_size_ = GLsizei(lineset->lines_.size() * 2);
return true;
}
size_t SimpleShaderForAxisAlignedBoundingBox::GetDataSize(
const geometry::Geometry &geometry) const {
auto lineset = geometry::LineSet<3>::CreateFromAxisAlignedBoundingBox(
(const geometry::AxisAlignedBoundingBox &)geometry);
return lineset->lines_.size() * 2;
}
bool SimpleShaderForTriangleMesh::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::TriangleMesh) {
PrintShaderWarning("Rendering type is not geometry::TriangleMesh.");
return false;
}
if (option.mesh_show_back_face_) {
glDisable(GL_CULL_FACE);
} else {
glEnable(GL_CULL_FACE);
}
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
if (option.mesh_show_wireframe_) {
glEnable(GL_POLYGON_OFFSET_FILL);
glPolygonOffset(1.0, 1.0);
} else {
glDisable(GL_POLYGON_OFFSET_FILL);
}
return true;
}
bool SimpleShaderForTriangleMesh::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::TriangleMesh) {
PrintShaderWarning("Rendering type is not geometry::TriangleMesh.");
return false;
}
const geometry::TriangleMesh &mesh =
(const geometry::TriangleMesh &)geometry;
if (mesh.HasTriangles() == false) {
PrintShaderWarning("Binding failed with empty triangle mesh.");
return false;
}
copy_trianglemesh_functor func(
thrust::raw_pointer_cast(mesh.vertices_.data()),
(int *)(thrust::raw_pointer_cast(mesh.triangles_.data())),
thrust::raw_pointer_cast(mesh.vertex_colors_.data()),
mesh.HasVertexColors(), option.mesh_color_option_,
option.default_mesh_color_, view);
thrust::transform(
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(mesh.triangles_.size() * 3),
make_tuple_iterator(points, colors), func);
draw_arrays_mode_ = GL_TRIANGLES;
draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3);
return true;
}
size_t SimpleShaderForTriangleMesh::GetDataSize(
const geometry::Geometry &geometry) const {
return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3;
}
bool SimpleShaderForVoxelGridLine::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::VoxelGrid) {
PrintShaderWarning("Rendering type is not geometry::VoxelGrid.");
return false;
}
glDisable(GL_CULL_FACE);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
return true;
}
bool SimpleShaderForVoxelGridLine::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::VoxelGrid) {
PrintShaderWarning("Rendering type is not geometry::VoxelGrid.");
return false;
}
const geometry::VoxelGrid &voxel_grid =
(const geometry::VoxelGrid &)geometry;
if (voxel_grid.HasVoxels() == false) {
PrintShaderWarning("Binding failed with empty voxel grid.");
return false;
}
utility::device_vector<Eigen::Vector3f> vertices(
voxel_grid.voxels_values_.size() * 8);
thrust::tiled_range<
thrust::counting_iterator<size_t>>
irange(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(8),
voxel_grid.voxels_values_.size());
auto gfunc = geometry::get_grid_index_functor<geometry::Voxel, Eigen::Vector3i>();
auto begin = thrust::make_transform_iterator(voxel_grid.voxels_values_.begin(), gfunc);
thrust::repeated_range<decltype(begin)>
vrange(begin, thrust::make_transform_iterator(voxel_grid.voxels_values_.end(), gfunc), 8);
geometry::compute_voxel_vertices_functor<Eigen::Vector3i> func1(voxel_grid.origin_, voxel_grid.voxel_size_);
thrust::transform(make_tuple_begin(irange, vrange), make_tuple_end(irange, vrange),
vertices.begin(), func1);
size_t n_out = voxel_grid.voxels_values_.size() * 12 * 2;
copy_voxelgrid_line_functor func2(
thrust::raw_pointer_cast(vertices.data()),
thrust::raw_pointer_cast(voxel_grid.voxels_values_.data()),
voxel_grid.HasColors(), option.mesh_color_option_,
option.default_mesh_color_, view);
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_out),
make_tuple_iterator(points, colors), func2);
draw_arrays_mode_ = GL_LINES;
draw_arrays_size_ = GLsizei(n_out);
return true;
}
size_t SimpleShaderForVoxelGridLine::GetDataSize(
const geometry::Geometry &geometry) const {
return ((const geometry::VoxelGrid &)geometry).voxels_values_.size() * 12 *
2;
}
bool SimpleShaderForDistanceTransform::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::DistanceTransform) {
PrintShaderWarning("Rendering type is not geometry::DistanceTransform.");
return false;
}
glPointSize(GLfloat(option.point_size_));
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
return true;
}
bool SimpleShaderForDistanceTransform::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::DistanceTransform) {
PrintShaderWarning("Rendering type is not geometry::DistanceTransform.");
return false;
}
const geometry::DistanceTransform &dist_trans =
(const geometry::DistanceTransform &)geometry;
if (dist_trans.IsEmpty()) {
PrintShaderWarning("Binding failed with empty distance transform.");
return false;
}
size_t n_out = dist_trans.voxels_.size();
copy_distance_voxel_functor
func(dist_trans.voxel_size_, dist_trans.resolution_, dist_trans.origin_,
dist_trans.voxel_size_ * dist_trans.resolution_ * 0.1);
thrust::transform(make_tuple_iterator(thrust::make_counting_iterator<size_t>(0), dist_trans.voxels_.begin()),
make_tuple_iterator(thrust::make_counting_iterator(n_out), dist_trans.voxels_.end()),
make_tuple_iterator(points, colors), func);
auto tp_begin = make_tuple_iterator(points, colors);
thrust::sort(utility::exec_policy(0)->on(0),
tp_begin, tp_begin + n_out, alpha_greater_functor());
draw_arrays_mode_ = GL_POINTS;
draw_arrays_size_ = GLsizei(n_out);
return true;
}
size_t SimpleShaderForDistanceTransform::GetDataSize(
const geometry::Geometry &geometry) const {
int res = ((const geometry::DistanceTransform &)geometry).resolution_;
return res * res * res;
}
|
6f26ce25fbcdb7578b07b670a57060cc8800792c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void assemble_boundary_potential_on_device(float * d_potential_dot_dot_acoustic, const float * d_send_potential_dot_dot_buffer, const int num_interfaces, const int max_nibool_interfaces, const int * d_nibool_interfaces, const int * d_ibool_interfaces){
int id;
int iglob;
int iloc;
int iinterface;
id = threadIdx.x + (blockIdx.x) * (blockDim.x) + ((gridDim.x) * (blockDim.x)) * (threadIdx.y + (blockIdx.y) * (blockDim.y));
for (iinterface = 0; iinterface <= num_interfaces - (1); iinterface += 1) {
if (id < d_nibool_interfaces[iinterface]) {
iloc = id + (max_nibool_interfaces) * (iinterface);
iglob = d_ibool_interfaces[iloc] - (1);
atomicAdd(d_potential_dot_dot_acoustic + iglob, d_send_potential_dot_dot_buffer[iloc]);
}
}
} | 6f26ce25fbcdb7578b07b670a57060cc8800792c.cu | #include "includes.h"
__global__ void assemble_boundary_potential_on_device(float * d_potential_dot_dot_acoustic, const float * d_send_potential_dot_dot_buffer, const int num_interfaces, const int max_nibool_interfaces, const int * d_nibool_interfaces, const int * d_ibool_interfaces){
int id;
int iglob;
int iloc;
int iinterface;
id = threadIdx.x + (blockIdx.x) * (blockDim.x) + ((gridDim.x) * (blockDim.x)) * (threadIdx.y + (blockIdx.y) * (blockDim.y));
for (iinterface = 0; iinterface <= num_interfaces - (1); iinterface += 1) {
if (id < d_nibool_interfaces[iinterface]) {
iloc = id + (max_nibool_interfaces) * (iinterface);
iglob = d_ibool_interfaces[iloc] - (1);
atomicAdd(d_potential_dot_dot_acoustic + iglob, d_send_potential_dot_dot_buffer[iloc]);
}
}
} |
84235191128a8b2688385d5e9cf3376ea8563ee3.hip | // !!! This is a file automatically generated by hipify!!!
#include "../THCTensorMathCompareT.cuh"
#include "THHTensor.hpp"
#include "THHStream.h"
#include "../generic/THCTensorMathCompareT.cu"
#include "../THCGenerateHalfType.h"
| 84235191128a8b2688385d5e9cf3376ea8563ee3.cu | #include "../THCTensorMathCompareT.cuh"
#include "THCTensor.hpp"
#include "THCStream.h"
#include "../generic/THCTensorMathCompareT.cu"
#include "../THCGenerateHalfType.h"
|
fa7b53a7fb23e00b2e2d110224c8b19dce118042.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gpuAdd.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int d_a = 1;
int d_b = 1;
int *d_c = NULL;
hipMalloc(&d_c, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gpuAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, d_a,d_b,d_c);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gpuAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, d_a,d_b,d_c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gpuAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, d_a,d_b,d_c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | fa7b53a7fb23e00b2e2d110224c8b19dce118042.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gpuAdd.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int d_a = 1;
int d_b = 1;
int *d_c = NULL;
cudaMalloc(&d_c, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gpuAdd<<<gridBlock,threadBlock>>>(d_a,d_b,d_c);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gpuAdd<<<gridBlock,threadBlock>>>(d_a,d_b,d_c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gpuAdd<<<gridBlock,threadBlock>>>(d_a,d_b,d_c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
f06a08ec86366817c8322674e234481e761b3668.hip | // !!! This is a file automatically generated by hipify!!!
#include <limits>
#include "custom_cuda_layers.h"
#include <hip/hip_runtime_api.h>
#include <cstdio>
#include <cstdlib>
#include <ctime>
namespace cg = cooperative_groups;
#define INPUT_TILE 1
#define INPUT_TILE1 1
// Input tile used in the gemm kernel v2
#define INPUT_TILE2 10
#define MAX_REG_SIZE 20
#define WARP_SIZE 32
#define MAX_WARP_NUM 32
#define MAX_BLOCK_SUM 8
#define loop_unroll 4
#define loop_unroll_bits 2
#define inner_loop_unroll 4
#define inner_loop_unroll_bits 2
#define INT8WIDTH 2
#define MAX_QUANTIZE_GROUPING 1024
#define ACC_HALF true
inline __device__ float gelu(const float x)
{
float y = 0.5 * x * (1.0 + tanhf(0.7978845608028654 * x * (1.0 + 0.044715 * x * x)));
return y;
}
__global__ void input_tiled_gemm_kernel_v2(__half* output,
const __half* vals,
const int8_t* weight,
const __half* bias,
unsigned hidden_dim,
unsigned block_reduce,
unsigned input_size,
unsigned output_size,
unsigned outputBlocks,
unsigned blockStride,
float* qscale,
unsigned groups,
__half* block_sums,
unsigned merge_count = 1,
unsigned quantization_stride = 1,
bool add_gelu = false)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
__half2* output_cast =
reinterpret_cast<__half2*>(((gridDim.x == outputBlocks) ? output : block_sums));
const __half2* vals_cast = reinterpret_cast<const __half2*>(vals);
const int16_t* weight_cast = reinterpret_cast<const int16_t*>(weight);
output_cast += ((blockIdx.x / outputBlocks) * (output_size));
weight_cast += ((blockIdx.x / outputBlocks) * blockStride);
vals_cast += (unsigned)(blockIdx.x / outputBlocks) * (hidden_dim >> 1);
// reading all the quantization scale into a small shared buffer
__shared__ __half shared_quantize_scale[MAX_QUANTIZE_GROUPING];
int merge_hidden = hidden_dim >> merge_count;
if (threadIdx.x < (groups << merge_count))
shared_quantize_scale[threadIdx.x] = __float2half(qscale[threadIdx.x]);
__syncthreads();
for (int j = 0; j < input_size; j += (INPUT_TILE2)) {
__half2 sum[INPUT_TILE2];
#pragma unroll
for (int t = 0; t < INPUT_TILE2; t++) sum[t] = __float2half2_rn(0.f);
{
int wid = gid << 2;
weight_cast += (wid * output_size + (blockIdx.x % outputBlocks) * WARP_SIZE + lane);
while (wid < hidden_dim) {
// updating the quantization scale
__half2 qscale_data;
{
auto tmp = shared_quantize_scale[0];
qscale_data = __halves2half2(tmp, tmp);
if (groups > 1) {
unsigned index = (blockIdx.x % outputBlocks) * WARP_SIZE + lane;
unsigned merge_index = wid / merge_hidden;
index = (wid - merge_index * merge_hidden) + (index << 1) * merge_hidden;
qscale_data = __halves2half2(
shared_quantize_scale[((index / quantization_stride) << merge_count) +
merge_index],
shared_quantize_scale[(((index + merge_hidden) / quantization_stride)
<< merge_count) +
merge_index]);
}
}
// Read the input
__shared__ __half2 vals_h[(loop_unroll >> 1) * INPUT_TILE2 * MAX_WARP_NUM];
{
// we read (loop_unroll >> 2) half-2 values per lane, and for 2 times of the
// INPUT_TILE this makes more threads engaged in reading data from shared memory
// into registers!
if (lane < (INPUT_TILE2 << 1)) {
if (((lane >> 1) + j) < input_size) {
// here, we consider loop_unroll is always higher that 4!
unsigned int inp_id = ((lane % 2) << (loop_unroll_bits - 2));
unsigned int offset =
(j + (lane >> 1)) * (block_reduce * (hidden_dim >> 1)) + inp_id;
#pragma unroll
for (int li = 0; li < (loop_unroll >> 2); li++) {
vals_h[li + inp_id + (((lane >> 1) << (loop_unroll_bits - 1))) +
(gid << (loop_unroll_bits - 1)) * INPUT_TILE2] =
vals_cast[offset + (wid >> 1) + li];
}
}
}
g.sync();
}
int col_index = (blockIdx.x % outputBlocks) * WARP_SIZE + lane;
if (col_index < output_size) {
__half2 weight_h[loop_unroll];
{
int16_t weight_q[loop_unroll];
#pragma unroll
for (int k = 0; k < loop_unroll; k++)
if ((k + wid) < hidden_dim) weight_q[k] = weight_cast[k * output_size];
#pragma unroll
for (int k = 0; k < loop_unroll; k++) {
int8_t* weight_8 = reinterpret_cast<int8_t*>(&weight_q[k]);
weight_h[k] = __halves2half2(__float2half((float)weight_8[0]),
__float2half((float)weight_8[1])) *
qscale_data;
}
}
// matrix-matrix multiply
#pragma unroll
for (int t = 0; t < INPUT_TILE2; t++) {
if ((t + j) < input_size) {
#pragma unroll
for (int li = 0; li < loop_unroll; li++) {
__half* val_h = reinterpret_cast<__half*>(
&vals_h[(t << (loop_unroll_bits - 1)) + (li >> 1) +
(gid << (loop_unroll_bits - 1)) * INPUT_TILE2]);
auto mul =
weight_h[li] * __halves2half2(val_h[li % 2], val_h[li % 2]);
if (ACC_HALF)
sum[t] += mul;
else {
float2 mul_f = __half22float2(mul);
float2 sum_f = __half22float2(sum[t]);
sum_f.x += mul_f.x;
sum_f.y += mul_f.y;
sum[t] = __float22half2_rn(sum_f);
}
}
}
}
}
wid += (warp_num << loop_unroll_bits);
weight_cast += (output_size * (warp_num << loop_unroll_bits));
}
}
const __half2* bias_cast;
if (bias) bias_cast = reinterpret_cast<const __half2*>(bias);
__shared__ __half2 partial_result[2 * MAX_WARP_NUM * (WARP_SIZE + 2)];
for (int t = 0; t < INPUT_TILE2; t += 2) {
if ((t + j) < input_size) {
partial_result[(gid << 1) * (WARP_SIZE + 2) + (lane << 1)] = sum[t];
partial_result[(gid << 1) * (WARP_SIZE + 2) + (lane << 1) + 1] = sum[t + 1];
b.sync();
if (ACC_HALF) {
sum[t] = partial_result[(lane << 1) * (WARP_SIZE + 2) + (gid << 1)];
sum[t + 1] = partial_result[(lane << 1) * (WARP_SIZE + 2) + (gid << 1) + 1];
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
float temp[2];
float* sum_f[2];
__half2* sum_h[2];
sum_f[0] = reinterpret_cast<float*>(&sum[t]);
sum_f[1] = reinterpret_cast<float*>(&sum[t + 1]);
temp[0] = g.shfl_xor(*sum_f[0], i);
temp[1] = g.shfl_xor(*sum_f[1], i);
sum_h[0] = reinterpret_cast<__half2*>(&temp[0]);
sum_h[1] = reinterpret_cast<__half2*>(&temp[1]);
sum[t] += *sum_h[0];
sum[t + 1] += *sum_h[1];
}
if (lane == 0) {
partial_result[(gid << 1)] = sum[t];
partial_result[(gid << 1) + 1] = sum[t + 1];
}
} else {
float2 sum_f[2];
sum_f[0] =
__half22float2(partial_result[(lane << 1) * (WARP_SIZE + 2) + (gid << 1)]);
sum_f[1] = __half22float2(
partial_result[(lane << 1) * (WARP_SIZE + 2) + (gid << 1) + 1]);
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_f[0].x += g.shfl_xor(sum_f[0].x, i);
sum_f[0].y += g.shfl_xor(sum_f[0].y, i);
sum_f[1].x += g.shfl_xor(sum_f[1].x, i);
sum_f[1].y += g.shfl_xor(sum_f[1].y, i);
}
if (lane == 0) {
partial_result[(gid << 1)] = __float22half2_rn(sum_f[0]);
partial_result[(gid << 1) + 1] = __float22half2_rn(sum_f[1]);
}
}
b.sync();
if (gid == (t >> 1)) {
sum[0] = partial_result[(lane << 1)];
sum[1] = partial_result[(lane << 1) + 1];
}
}
}
if ((gid << 1) < INPUT_TILE2 && ((gid << 1) + j) < input_size) {
int col_index = (blockIdx.x % outputBlocks) * WARP_SIZE + lane;
if (col_index < output_size) {
if (bias && blockIdx.x < outputBlocks) {
__half2 bias_h = bias_cast[col_index];
float2 bias_f = __half22float2(bias_h);
float2 sum_f[2];
sum_f[0] = __half22float2(sum[0]);
sum_f[1] = __half22float2(sum[1]);
sum_f[0].x += bias_f.x;
sum_f[0].y += bias_f.y;
sum_f[1].x += bias_f.x;
sum_f[1].y += bias_f.y;
if (add_gelu && gridDim.x == outputBlocks) {
sum_f[0].x = gelu(sum_f[0].x);
sum_f[1].x = gelu(sum_f[1].x);
sum_f[0].y = gelu(sum_f[0].y);
sum_f[1].y = gelu(sum_f[1].y);
}
sum[0] = __float22half2_rn(sum_f[0]);
sum[1] = __float22half2_rn(sum_f[1]);
}
output_cast[col_index + (j + (gid << 1)) * (block_reduce * output_size)] = (sum[0]);
if ((input_size - ((gid << 1) + j)) > 1)
output_cast[col_index + (j + (gid << 1) + 1) * (block_reduce * output_size)] =
(sum[1]);
}
}
weight_cast = reinterpret_cast<const int16_t*>(weight);
weight_cast += ((blockIdx.x / outputBlocks) * blockStride);
}
}
__global__ void input_tiled_gemm_kernel_v2(float* output,
const float* vals,
const float* weight,
const float* bias,
float* block_sums,
int hidden_dim,
int block_reduce,
int input_size,
int output_size,
int outputBlocks,
int blockStride,
bool add_gelu = false)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
unsigned int gid = threadIdx.x >> 5;
unsigned int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
float2* output_cast =
reinterpret_cast<float2*>(((gridDim.x == outputBlocks) ? output : block_sums));
const float2* vals_cast = reinterpret_cast<const float2*>(vals);
const float2* weight_cast = reinterpret_cast<const float2*>(weight);
output_cast += (unsigned)(blockIdx.x / outputBlocks) * (output_size);
int hidden_half = hidden_dim >> 1;
weight_cast += ((unsigned)(blockIdx.x / outputBlocks) * blockStride);
vals_cast += (unsigned)(blockIdx.x / outputBlocks) * hidden_half;
for (int j = 0; j < input_size; j += (INPUT_TILE2)) {
float2 sum[INPUT_TILE2];
#pragma unroll
for (int t = 0; t < (INPUT_TILE2); t++) {
sum[t].x = 0;
sum[t].y = 0;
}
{
int wid = gid << 1;
int offset = wid * output_size;
while (wid < hidden_dim) {
float2 val_data[INPUT_TILE2];
{
for (int t = 0; t < INPUT_TILE2; t++) {
if ((t + j) < input_size) {
val_data[t] =
vals_cast[(j + t) * (hidden_half * block_reduce) + (wid >> 1)];
}
}
}
int row = (blockIdx.x % outputBlocks) * WARP_SIZE + lane;
auto offset1 = offset + row;
while (row < output_size) {
float2 weight[2];
weight[0] = weight_cast[offset1];
weight[1] = weight_cast[output_size + offset1];
for (int t = 0; t < INPUT_TILE2; t++) {
if ((t + j) < input_size) {
float2 mul[2];
mul[0].x = val_data[t].x * weight[0].x;
mul[0].y = val_data[t].x * weight[0].y;
mul[1].x = val_data[t].y * weight[1].x;
mul[1].y = val_data[t].y * weight[1].y;
sum[t].x += mul[0].x + mul[1].x;
sum[t].y += mul[0].y + mul[1].y;
}
}
row += (gridDim.x * WARP_SIZE);
offset1 += (gridDim.x * WARP_SIZE);
}
wid += warp_num * 2;
offset += (output_size * warp_num * 2);
}
}
{
const float2* bias_cast;
if (bias) bias_cast = reinterpret_cast<const float2*>(bias);
__shared__ float2 partial_result[MAX_WARP_NUM * (WARP_SIZE + 1)];
for (int t = 0; t < (INPUT_TILE2); t++) {
if ((t + j) < input_size) {
float2 sum_g = sum[t];
partial_result[gid * (WARP_SIZE + 1) + lane] = sum_g;
__syncthreads();
sum_g = partial_result[lane * (WARP_SIZE + 1) + gid];
__syncthreads();
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_g.x += g.shfl_xor(sum_g.x, i);
sum_g.y += g.shfl_xor(sum_g.y, i);
}
if (lane == 0) { partial_result[gid] = sum_g; }
__syncthreads();
sum[t] = partial_result[lane];
}
}
if (gid < INPUT_TILE2 && ((gid + j) < input_size)) {
int col = (blockIdx.x % outputBlocks) * WARP_SIZE + lane;
if (col < output_size) {
if (bias && blockIdx.x < outputBlocks) {
float2 bias_f = bias_cast[col];
sum[gid].x += bias_f.x;
sum[gid].y += bias_f.y;
if (add_gelu && gridDim.x == outputBlocks) {
sum[gid].x = gelu(sum[gid].x);
sum[gid].y = gelu(sum[gid].y);
}
}
output_cast[col + (j + gid) * (output_size * block_reduce)] = sum[gid];
}
}
}
}
}
__global__ void input_tiled_gemm_kernel_v2(__half* output,
const __half* vals,
const __half* weight,
const __half* bias,
__half* block_sums,
unsigned int hidden_dim,
unsigned int block_reduce,
unsigned int input_size,
unsigned int output_size,
unsigned int outputBlocks,
unsigned int blockStride,
bool add_gelu = false)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
unsigned int gid = threadIdx.x >> 5;
unsigned int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
__half2* output_cast =
reinterpret_cast<__half2*>(((gridDim.x == outputBlocks) ? output : block_sums));
const __half2* vals_cast = reinterpret_cast<const __half2*>(vals);
const __half2* weight_cast = reinterpret_cast<const __half2*>(weight);
output_cast += (unsigned)(blockIdx.x / outputBlocks) * (output_size);
int hidden_half = hidden_dim >> 1;
weight_cast += ((unsigned)(blockIdx.x / outputBlocks) * blockStride);
vals_cast += (unsigned)(blockIdx.x / outputBlocks) * hidden_half;
for (int j = 0; j < input_size; j += (INPUT_TILE2)) {
__half2 sum[INPUT_TILE2];
#pragma unroll
for (int t = 0; t < INPUT_TILE2; t++) { sum[t] = __float2half2_rn(0.f); }
{
int wid = gid << loop_unroll_bits;
weight_cast += wid * output_size + (blockIdx.x % outputBlocks) * WARP_SIZE + lane;
while (wid < hidden_dim) {
__shared__ __half2 vals_h[(loop_unroll >> 1) * INPUT_TILE2 * MAX_WARP_NUM];
{
// we read (loop_unroll >> 2) half-2 values per lane, and for 2 times of the
// INPUT_TILE this makes more threads engaged in reading data from shared memory
// into registers!
if (lane < (INPUT_TILE2 << 1)) {
if (((lane >> 1) + j) < input_size) {
// here, we consider loop_unroll is always higher that 4!
unsigned int inp_id = ((lane % 2) << (loop_unroll_bits - 2));
unsigned int offset =
(j + (lane >> 1)) * (block_reduce * (hidden_dim >> 1)) + inp_id;
#pragma unroll
for (int li = 0; li < (loop_unroll >> 2); li++) {
vals_h[li + inp_id + (((lane >> 1) << (loop_unroll_bits - 1))) +
(gid << (loop_unroll_bits - 1)) * INPUT_TILE2] =
vals_cast[offset + (wid >> 1) + li];
}
}
}
g.sync();
}
int col = (blockIdx.x % outputBlocks) * WARP_SIZE + lane;
if (col < output_size) {
__half2 weight_h[loop_unroll];
#pragma unroll
for (int k = 0; k < loop_unroll; k++)
weight_h[k] = weight_cast[output_size * k];
#pragma unroll
for (int t = 0; t < INPUT_TILE2; t++) {
float2 sum_f;
if (!ACC_HALF) sum_f = __half22float2(sum[t]);
#pragma unroll
for (int li = 0; li < (loop_unroll >> 1); li++) {
__half* inp_data = reinterpret_cast<__half*>(
&vals_h[(t << (loop_unroll_bits - 1)) + li +
(gid << (loop_unroll_bits - 1)) * INPUT_TILE2]);
#pragma unroll
for (int k = 0; k < 2; k++) {
if (ACC_HALF)
sum[t] += __halves2half2(inp_data[k], inp_data[k]) *
weight_h[(li << 1) + k];
else {
float2 weight_f =
__half22float2(__halves2half2(inp_data[k], inp_data[k]) *
weight_h[(li << 1) + k]);
sum_f.x += weight_f.x;
sum_f.y += weight_f.y;
}
}
}
if (!ACC_HALF) sum[t] = __float22half2_rn(sum_f);
}
}
wid += warp_num << loop_unroll_bits;
weight_cast += (output_size * (warp_num << loop_unroll_bits));
}
}
{
const __half2* bias_cast;
if (bias) bias_cast = reinterpret_cast<const __half2*>(bias);
__shared__ __half2 partial_result[2 * MAX_WARP_NUM * (WARP_SIZE + 2)];
for (int t = 0; t < INPUT_TILE2; t += 2) {
if ((t + j) < input_size) {
partial_result[(gid << 1) * (WARP_SIZE + 2) + (lane << 1)] = sum[t];
partial_result[(gid << 1) * (WARP_SIZE + 2) + (lane << 1) + 1] = sum[t + 1];
b.sync();
float2 sum_f[2];
sum_f[0] =
__half22float2(partial_result[(lane << 1) * (WARP_SIZE + 2) + (gid << 1)]);
sum_f[1] = __half22float2(
partial_result[(lane << 1) * (WARP_SIZE + 2) + (gid << 1) + 1]);
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_f[0].x += g.shfl_xor(sum_f[0].x, i);
sum_f[1].y += g.shfl_xor(sum_f[1].y, i);
sum_f[1].x += g.shfl_xor(sum_f[1].x, i);
sum_f[0].y += g.shfl_xor(sum_f[0].y, i);
}
if (lane == 0) {
partial_result[(gid << 1)] = __float22half2_rn(sum_f[0]);
partial_result[(gid << 1) + 1] = __float22half2_rn(sum_f[1]);
}
b.sync();
if (gid == (t >> 1)) {
sum[t] = partial_result[(lane << 1)];
sum[t + 1] = partial_result[(lane << 1) + 1];
}
}
}
if ((gid << 1) < INPUT_TILE2 && ((gid << 1) + j) < input_size) {
int col = (blockIdx.x % outputBlocks) * WARP_SIZE + lane;
if (col < output_size) {
if (bias && blockIdx.x < outputBlocks) {
__half2 bias_h = bias_cast[col];
float2 bias_f = __half22float2(bias_h);
float2 sum_f[2];
sum_f[0] = __half22float2(sum[(gid << 1)]);
sum_f[1] = __half22float2(sum[(gid << 1) + 1]);
sum_f[0].x += bias_f.x;
sum_f[0].y += bias_f.y;
sum_f[1].x += bias_f.x;
sum_f[1].y += bias_f.y;
if (add_gelu && gridDim.x == outputBlocks) {
sum_f[0].x = gelu(sum_f[0].x);
sum_f[0].y = gelu(sum_f[0].y);
sum_f[1].x = gelu(sum_f[1].x);
sum_f[1].y = gelu(sum_f[1].y);
}
sum[(gid << 1)] = __float22half2_rn(sum_f[0]);
sum[(gid << 1) + 1] = __float22half2_rn(sum_f[0]);
}
output_cast[col + (j + (gid << 1)) * (block_reduce * output_size)] =
(sum[(gid << 1)]);
if (((gid << 1) + j + 1) < input_size)
output_cast[col + (j + (gid << 1) + 1) * (block_reduce * output_size)] =
(sum[(gid << 1) + 1]);
}
}
}
weight_cast = reinterpret_cast<const __half2*>(weight);
weight_cast += ((blockIdx.x / outputBlocks) * blockStride);
}
#endif
}
__global__ void block_reduce_kernel(float* output,
float* block_sums,
int batch,
int output_size,
bool add_gelu = false)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
unsigned total_count = batch * output_size;
unsigned int gid = threadIdx.x >> 5;
unsigned int lane = threadIdx.x & 0x1f;
unsigned int warp_num = blockDim.x >> 5;
float2* output_cast = reinterpret_cast<float2*>(output);
float2* block_sums_cast = reinterpret_cast<float2*>(block_sums);
unsigned int col_index = blockIdx.x * WARP_SIZE + lane;
block_sums_cast += gid * output_size;
if (col_index < total_count) {
__shared__ float2 data_shared[MAX_WARP_NUM * (WARP_SIZE + 1)];
data_shared[gid * (WARP_SIZE) + lane] =
block_sums_cast[(col_index / output_size) * (warp_num * output_size) +
col_index % output_size];
b.sync();
float2 data = data_shared[(lane % warp_num) * WARP_SIZE + gid * (WARP_SIZE / warp_num) +
(lane / warp_num)];
b.sync();
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
data.x += g.shfl_down(data.x, i);
data.y += g.shfl_down(data.y, i);
}
if ((lane % warp_num) == 0) {
if (add_gelu) {
data.x = gelu(data.x);
data.y = gelu(data.y);
}
data_shared[gid * (WARP_SIZE / warp_num) + (lane / warp_num)] = (data);
}
b.sync();
if (gid == 0) output_cast[col_index] = data_shared[lane];
}
}
__global__ void block_reduce_kernel(__half* output,
__half* block_sums,
unsigned batch,
unsigned int output_size,
bool add_gelu = false)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
unsigned total_count = batch * output_size;
unsigned int gid = threadIdx.x >> 5;
unsigned int lane = threadIdx.x & 0x1f;
unsigned int warp_num = blockDim.x >> 5;
__half2* output_cast = reinterpret_cast<__half2*>(output);
__half2* block_sums_cast = reinterpret_cast<__half2*>(block_sums);
unsigned int col_index = blockIdx.x * WARP_SIZE + lane;
block_sums_cast += gid * output_size;
if (col_index < total_count) {
__shared__ __half2 data_shared[MAX_WARP_NUM * (WARP_SIZE + 1)];
data_shared[gid * (WARP_SIZE) + lane] =
block_sums_cast[(col_index / output_size) * (warp_num * output_size) +
col_index % output_size];
b.sync();
float2 data = __half22float2(data_shared[(lane % warp_num) * WARP_SIZE +
gid * (WARP_SIZE / warp_num) + (lane / warp_num)]);
b.sync();
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
data.x += g.shfl_down(data.x, i);
data.y += g.shfl_down(data.y, i);
}
if ((lane % warp_num) == 0) {
if (add_gelu) {
data.x = gelu(data.x);
data.y = gelu(data.y);
}
data_shared[gid * (WARP_SIZE / warp_num) + (lane / warp_num)] = __float22half2_rn(data);
}
b.sync();
if (gid == 0) output_cast[col_index] = data_shared[lane];
}
}
template <typename T>
void launch_input_tiled_gemm_kernel_v2(T* output,
const T* vals,
const int8_t* weight,
const T* bias,
unsigned int hidden_dim,
unsigned int input_size,
unsigned int output_size,
float* scale,
unsigned int groups,
unsigned int merge_count,
T* block_sums,
bool add_gelu,
hipStream_t stream)
{
output_size /= 2;
int outputBlocks = (output_size - 1) / WARP_SIZE + 1;
int block_reduce = (SMs > outputBlocks ? SMs / outputBlocks : 1);
int br2 = (int)log2(block_reduce);
block_reduce = (int)pow(2.0, (float)br2);
constexpr int threads = 1024;
int blockStride = (output_size * hidden_dim) / block_reduce;
dim3 grid_dim(outputBlocks * block_reduce);
dim3 block_dim(threads);
hipLaunchKernelGGL(( input_tiled_gemm_kernel_v2), dim3(grid_dim), dim3(block_dim), 0, stream,
output,
vals,
weight,
bias,
hidden_dim / block_reduce,
block_reduce,
input_size,
output_size,
outputBlocks,
blockStride,
scale,
groups,
block_sums,
merge_count,
((hidden_dim >> merge_count) * (output_size << 1)) / groups,
add_gelu);
if (block_reduce > 1) {
dim3 grids(((output_size * input_size) - 1) / WARP_SIZE + 1);
dim3 blocks(block_reduce * WARP_SIZE);
hipLaunchKernelGGL(( block_reduce_kernel), dim3(grids), dim3(blocks), 0, stream,
output, block_sums, input_size, (output_size), add_gelu);
}
}
template <typename T>
void launch_input_tiled_gemm_kernel_v2(T* output,
const T* vals,
const T* weight,
const T* bias,
T* block_sums,
unsigned int hidden_dim,
unsigned int input_size,
unsigned int output_size,
bool add_gelu,
hipStream_t stream)
{
output_size /= 2;
int outputBlocks = (output_size - 1) / WARP_SIZE + 1;
int block_reduce = (SMs > outputBlocks ? SMs / outputBlocks : 1);
int br2 = (int)log2(block_reduce);
block_reduce = (int)pow(2.0, (float)br2);
constexpr int threads = 1024;
int blockStride = (output_size * hidden_dim) / block_reduce;
dim3 grid_dim(outputBlocks * block_reduce);
dim3 block_dim(threads);
hipLaunchKernelGGL(( input_tiled_gemm_kernel_v2), dim3(grid_dim), dim3(block_dim), 0, stream, output,
vals,
weight,
bias,
block_sums,
hidden_dim / block_reduce,
block_reduce,
input_size,
output_size,
outputBlocks,
blockStride,
add_gelu);
if (block_reduce > 1) {
dim3 grids(((output_size * input_size) - 1) / WARP_SIZE + 1);
dim3 blocks(block_reduce * WARP_SIZE);
hipLaunchKernelGGL(( block_reduce_kernel), dim3(grids), dim3(blocks), 0, stream,
output, block_sums, input_size, (output_size), add_gelu);
}
}
template void launch_input_tiled_gemm_kernel_v2(__half* output,
const __half* vals,
const __half* weight,
const __half* bias,
__half* block_sums,
unsigned int hidden_dim,
unsigned int input_size,
unsigned int output_size,
bool add_gelu,
hipStream_t stream);
template void launch_input_tiled_gemm_kernel_v2(float* output,
const float* vals,
const float* weight,
const float* bias,
float* block_sums,
unsigned int hidden_dim,
unsigned int input_size,
unsigned int output_size,
bool add_gelu,
hipStream_t stream);
template void launch_input_tiled_gemm_kernel_v2(__half* output,
const __half* vals,
const int8_t* weight,
const __half* bias,
unsigned int hidden_dim,
unsigned int input_size,
unsigned int output_size,
float* scale,
unsigned int groups,
unsigned int merge_count,
__half* block_sums,
bool add_gelu,
hipStream_t stream);
__global__ void input_tiled_gemm_kernel(__half* output,
const __half* vals,
const int8_t* weight,
const __half* bias,
int hidden_dim,
int input_size,
int output_size,
float* qscale,
int groups,
int merge_count = 1)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
__half2* output_cast = reinterpret_cast<__half2*>(output);
const __half2* vals_cast = reinterpret_cast<const __half2*>(vals);
const int16_t* weight_cast = reinterpret_cast<const int16_t*>(weight);
int hidden_half = hidden_dim >> 1;
int merge_hidden = hidden_dim >> merge_count;
int quantization_stride = (merge_hidden * (output_size << 1)) / groups;
// reading all the quantization scale into a small shared buffer
__shared__ __half shared_quantize_scale[MAX_QUANTIZE_GROUPING];
if (threadIdx.x < (groups << merge_count))
shared_quantize_scale[threadIdx.x] = __float2half(qscale[threadIdx.x]);
__syncthreads();
int col_index = blockIdx.x * WARP_SIZE + lane;
for (int j = 0; j < input_size; j += (INPUT_TILE1)) {
__half2 sum[INPUT_TILE1];
#pragma unroll
for (int t = 0; t < INPUT_TILE1; t++) sum[t] = __float2half2_rn(0.f);
{
int wid = gid << 2;
weight_cast += (wid * output_size + col_index);
while (wid < hidden_dim) {
// updating the quantization scale
__half2 qscale_data;
{
auto tmp = shared_quantize_scale[0];
qscale_data = __halves2half2(tmp, tmp);
if (groups > 1) {
unsigned index;
unsigned merge_index = wid / merge_hidden;
index =
(wid - merge_index * merge_hidden) + (col_index << 1) * merge_hidden;
qscale_data = __halves2half2(
shared_quantize_scale[((index / quantization_stride) << merge_count) +
merge_index],
shared_quantize_scale[(((index + merge_hidden) / quantization_stride)
<< merge_count) +
merge_index]);
}
}
__half2 vals_f[INPUT_TILE1 * loop_unroll];
#pragma unroll
for (int t = 0; t < INPUT_TILE1; t++) {
__half2 val_h[loop_unroll >> 1];
val_h[0] = vals_cast[(j + t) * hidden_half + (wid >> 1)];
val_h[1] = vals_cast[(j + t) * hidden_half + (wid >> 1) + 1];
__half* inp_data[2];
inp_data[0] = reinterpret_cast<__half*>(&val_h[0]);
inp_data[1] = reinterpret_cast<__half*>(&val_h[1]);
vals_f[(t << 2)] = __halves2half2(inp_data[0][0], inp_data[0][0]);
vals_f[(t << 2) + 1] = __halves2half2(inp_data[0][1], inp_data[0][1]);
vals_f[(t << 2) + 2] = __halves2half2(inp_data[1][0], inp_data[1][0]);
vals_f[(t << 2) + 3] = __halves2half2(inp_data[1][1], inp_data[1][1]);
}
if (col_index < output_size) {
int16_t weight_q[loop_unroll];
#pragma unroll
for (int k = 0; k < loop_unroll; k++)
weight_q[k] = weight_cast[k * output_size];
#pragma unroll
for (int t = 0; t < INPUT_TILE1; t++) {
#pragma unroll
for (int li = 0; li < loop_unroll; li++) {
float2 weight_f;
int8_t* weight_8 = reinterpret_cast<int8_t*>(&weight_q[li]);
weight_f.x = (float)weight_8[0];
weight_f.y = (float)weight_8[1];
auto mul =
__float22half2_rn(weight_f) * qscale_data * vals_f[(t << 2) + li];
if (ACC_HALF)
sum[t] += mul;
else {
float2 mul_f = __half22float2(mul);
float2 sum_f = __half22float2(sum[t]);
sum_f.x += mul_f.x;
sum_f.y += mul_f.y;
sum[t] = __float22half2_rn(sum_f);
}
}
}
}
wid += (warp_num << loop_unroll_bits);
weight_cast += (output_size * (warp_num << loop_unroll_bits));
}
}
{
const __half2* bias_cast;
if (bias) bias_cast = reinterpret_cast<const __half2*>(bias);
__shared__ __half2 partial_result[MAX_WARP_NUM * (WARP_SIZE + 1)];
for (int t = 0; t < INPUT_TILE1; t++) {
partial_result[gid * (WARP_SIZE + 1) + lane] = sum[t];
__syncthreads();
sum[t] = partial_result[lane * (WARP_SIZE + 1) + gid];
if (ACC_HALF) {
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
float* sum_f = reinterpret_cast<float*>(&sum[t]);
float temp = g.shfl_xor(*sum_f, i);
__half2* sum_h = reinterpret_cast<__half2*>(&temp);
sum[t] += *sum_h;
}
} else {
float2 sum_f = __half22float2(sum[t]);
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_f.x += g.shfl_xor(sum_f.x, i);
sum_f.y += g.shfl_xor(sum_f.y, i);
}
sum[t] = __float22half2_rn(sum_f);
}
if (lane == 0) { partial_result[gid] = sum[t]; }
__syncthreads();
if (gid == t) sum[0] = partial_result[lane];
}
if (gid < (INPUT_TILE1) && (gid + j) < input_size && col_index < output_size) {
if (bias) {
float2 bias_f = __half22float2(bias_cast[col_index]);
float2 sum_f = __half22float2(sum[0]);
sum_f.x += bias_f.x;
sum_f.y += bias_f.y;
sum[0] = __float22half2_rn(sum_f);
}
output_cast[col_index + (j + gid) * output_size] = sum[0];
}
}
weight_cast = reinterpret_cast<const int16_t*>(weight);
}
}
__global__ void input_tiled_gemm_kernel(__half* output,
const __half* vals,
const __half* weight,
const __half* bias,
int hidden_dim,
int input_size,
int output_size)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
__half2* output_cast = reinterpret_cast<__half2*>(output);
const __half2* vals_cast = reinterpret_cast<const __half2*>(vals);
const __half2* weight_cast = reinterpret_cast<const __half2*>(weight);
unsigned int col_index = blockIdx.x * WARP_SIZE + lane;
int hidden_half = hidden_dim >> 1;
for (int j = 0; j < input_size; j += (INPUT_TILE1)) {
__half2 sum[INPUT_TILE1];
#pragma unroll
for (int t = 0; t < (INPUT_TILE1); t++) { sum[t] = __float2half2_rn(0.f); }
{
int wid = gid << loop_unroll_bits;
weight_cast += (wid * output_size + col_index);
while (wid < hidden_dim) {
__half2 vals_f[loop_unroll * (INPUT_TILE1)];
{
for (int t = 0; t < (INPUT_TILE1); t++) {
if ((t + j) < input_size) {
__half2 val_h[2];
val_h[0] = vals_cast[(j + t) * hidden_half + (wid >> 1)];
val_h[1] = vals_cast[(j + t) * hidden_half + (wid >> 1) + 1];
__half* inp_data[2];
inp_data[0] = reinterpret_cast<__half*>(&val_h[0]);
inp_data[1] = reinterpret_cast<__half*>(&val_h[1]);
vals_f[(t << 2)] = __halves2half2(inp_data[0][0], inp_data[0][0]);
vals_f[(t << 2) + 1] = __halves2half2(inp_data[0][1], inp_data[0][1]);
vals_f[(t << 2) + 2] = __halves2half2(inp_data[1][0], inp_data[1][0]);
vals_f[(t << 2) + 3] = __halves2half2(inp_data[1][1], inp_data[1][1]);
}
}
}
if (col_index < output_size) {
__half2 weight_h[loop_unroll];
#pragma unroll
for (int k = 0; k < loop_unroll; k++) {
if ((k + wid) < hidden_dim)
weight_h[k] = weight_cast[k * output_size];
else
weight_h[k] = __float2half2_rn(0.f);
}
#pragma unroll
for (int k = 0; k < (loop_unroll >> inner_loop_unroll_bits); k++) {
#pragma unroll
for (int t = 0; t < (INPUT_TILE1); t++) {
if ((t + j) < input_size) {
#pragma unroll
for (int li = 0; li < inner_loop_unroll; li++) {
weight_h[0] = (vals_f[(t << 2) + li] * weight_h[li]);
if (ACC_HALF)
sum[t] += weight_h[0];
else {
float2 weight_f = __half22float2(weight_h[0]);
float2 sum_f = __half22float2(sum[t]);
sum_f.x += weight_f.x;
sum_f.y += weight_f.y;
sum[t] = __float22half2_rn(sum_f);
}
}
}
}
}
}
wid += (warp_num << loop_unroll_bits);
weight_cast += (output_size * (warp_num << loop_unroll_bits));
}
}
{
const __half2* bias_cast;
if (bias) bias_cast = reinterpret_cast<const __half2*>(bias);
__shared__ __half2 partial_result[MAX_WARP_NUM * (WARP_SIZE + 1)];
for (int t = 0; t < (INPUT_TILE1); t++) {
if ((t + j) < input_size) {
__half2 sum_g = sum[t];
partial_result[gid * (WARP_SIZE + 1) + lane] = sum[t];
b.sync();
float2 sum_f;
sum_f = __half22float2(partial_result[lane * (WARP_SIZE + 1) + gid]);
b.sync();
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_f.x += g.shfl_xor(sum_f.x, i);
sum_f.y += g.shfl_xor(sum_f.y, i);
}
if (lane == 0) { partial_result[gid] = __float22half2_rn(sum_f); }
b.sync();
if (gid == 0) {
int col = blockIdx.x * WARP_SIZE + lane;
if (col < output_size) {
sum_g = partial_result[lane];
if (bias) {
float2 bias_f = __half22float2(bias_cast[col]);
sum_f = __half22float2(sum_g);
sum_f.x += bias_f.x;
sum_f.y += bias_f.y;
sum_g = __float22half2_rn(sum_f);
}
output_cast[col + (j + t) * output_size] = (sum_g);
}
}
}
}
}
weight_cast = reinterpret_cast<const __half2*>(weight);
}
#endif
}
__global__ void input_tiled_gemm_kernel(float* output,
const float* vals,
const float* weight,
const float* bias,
int hidden_dim,
int input_size,
int output_size)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
float2* output_cast = reinterpret_cast<float2*>(output);
const float2* vals_cast = reinterpret_cast<const float2*>(vals);
const float2* weight_cast = reinterpret_cast<const float2*>(weight);
int hidden_half = hidden_dim >> 1;
for (int j = 0; j < input_size; j += (INPUT_TILE1)) {
float2 sum[INPUT_TILE1];
#pragma unroll
for (int t = 0; t < (INPUT_TILE1); t++) {
sum[t].x = 0;
sum[t].y = 0;
}
{
int wid = gid << 1;
int offset = wid * output_size;
while (wid < hidden_dim) {
float2 val_data[INPUT_TILE1];
{
for (int t = 0; t < INPUT_TILE1; t++) {
if ((t + j) < input_size) {
val_data[t] = vals_cast[(j + t) * hidden_half + (wid >> 1)];
}
}
}
int row = blockIdx.x * WARP_SIZE + lane;
auto offset1 = offset + row;
while (row < output_size) {
float2 weight[2];
weight[0] = weight_cast[offset1];
weight[1] = weight_cast[output_size + offset1];
for (int t = 0; t < INPUT_TILE1; t++) {
if ((t + j) < input_size) {
float2 mul[2];
mul[0].x = val_data[t].x * weight[0].x;
mul[0].y = val_data[t].x * weight[0].y;
mul[1].x = val_data[t].y * weight[1].x;
mul[1].y = val_data[t].y * weight[1].y;
sum[t].x += mul[0].x + mul[1].x;
sum[t].y += mul[0].y + mul[1].y;
}
}
row += (gridDim.x * WARP_SIZE);
offset1 += (gridDim.x * WARP_SIZE);
}
wid += warp_num * 2;
offset += (output_size * warp_num * 2);
}
}
{
const float2* bias_cast;
if (bias) bias_cast = reinterpret_cast<const float2*>(bias);
__shared__ float2 partial_result[MAX_WARP_NUM * (WARP_SIZE + 1)];
for (int t = 0; t < (INPUT_TILE1); t++) {
if ((t + j) < input_size) {
float2 sum_g = sum[t];
partial_result[gid * (WARP_SIZE + 1) + lane] = sum_g;
__syncthreads();
sum_g = partial_result[lane * (WARP_SIZE + 1) + gid];
__syncthreads();
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_g.x += g.shfl_xor(sum_g.x, i);
sum_g.y += g.shfl_xor(sum_g.y, i);
}
if (lane == 0) { partial_result[gid] = sum_g; }
__syncthreads();
if (gid == 0) {
int col = blockIdx.x * WARP_SIZE + lane;
if (col < output_size) {
sum_g = partial_result[lane];
if (bias) {
float2 bias_f = bias_cast[col];
sum_g.x += bias_f.x;
sum_g.y += bias_f.y;
}
output_cast[col + (j + t) * output_size] = sum_g;
}
}
}
}
}
}
}
template <typename T>
void launch_input_tiled_gemm_kernel(T* output,
const T* vals,
const T* weight,
const T* bias,
int hidden_dim,
int input_size,
int output_size,
hipStream_t stream)
{
constexpr int threads = 1024;
output_size /= 2;
dim3 grid_dim((output_size - 1) / WARP_SIZE + 1);
dim3 block_dim(threads);
hipLaunchKernelGGL(( input_tiled_gemm_kernel), dim3(grid_dim), dim3(block_dim), 0, stream,
output, vals, weight, bias, hidden_dim, input_size, output_size);
}
template void launch_input_tiled_gemm_kernel(float* output,
const float* vals,
const float* weight,
const float* bias,
int hidden_dim,
int input_size,
int output_size,
hipStream_t stream);
template void launch_input_tiled_gemm_kernel(__half* output,
const __half* vals,
const __half* weight,
const __half* bias,
int hidden_dim,
int input_size,
int output_size,
hipStream_t stream);
template <typename T>
void launch_input_tiled_gemm_kernel(T* output,
const T* vals,
const int8_t* weight,
const T* bias,
int hidden_dim,
int input_size,
int output_size,
float* scale,
int groups,
int merge_count,
hipStream_t stream)
{
constexpr int threads = 1024;
output_size /= 2;
dim3 grid_dim((output_size - 1) / WARP_SIZE + 1);
dim3 block_dim(threads);
hipLaunchKernelGGL(( input_tiled_gemm_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, output,
vals,
weight,
bias,
hidden_dim,
input_size,
output_size,
scale,
groups,
merge_count);
}
template void launch_input_tiled_gemm_kernel(__half* output,
const __half* vals,
const int8_t* weight,
const __half* bias,
int hidden_dim,
int input_size,
int output_size,
float* scale,
int groups,
int merge_count,
hipStream_t stream);
__global__ void tiled_gemm_kernel_gelu(__half* output,
const __half* vals,
const __half* weight,
const __half* bias,
int hidden_dim,
int input_size,
int output_size)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
__half2 inp_reg[8];
__half2* output_cast = reinterpret_cast<__half2*>(output);
const __half2* vals_cast = reinterpret_cast<const __half2*>(vals);
const __half2* weight_cast = reinterpret_cast<const __half2*>(weight);
int input_tile = (input_size < INPUT_TILE ? input_size : INPUT_TILE);
int hidden_half = hidden_dim >> 1;
for (int j = 0; j < input_size; j += input_tile) {
__shared__ __half2 input_shared[9000];
{
int k = 0;
int input_id = id;
while (input_id < hidden_half) {
inp_reg[k] = vals_cast[j * hidden_half + input_id];
float2 inp_f = __half22float2(inp_reg[k]);
inp_f.x = gelu(inp_f.x);
inp_f.y = gelu(inp_f.y);
inp_reg[k] = __float22half2_rn(inp_f);
input_shared[input_id] = inp_reg[k++];
input_id += blockDim.x;
}
b.sync();
}
int wid = gid << 2;
int offset = wid * output_size;
float2 sum;
sum.x = 0;
sum.y = 0;
while (wid < hidden_dim) {
__half2 vals_f[4];
{
__half2 val_h[2];
val_h[0] = input_shared[(wid >> 1)];
val_h[1] = input_shared[(wid >> 1) + 1];
__half* inp_data[2];
inp_data[0] = reinterpret_cast<__half*>(&val_h[0]);
inp_data[1] = reinterpret_cast<__half*>(&val_h[1]);
vals_f[0] = __halves2half2(inp_data[0][0], inp_data[0][0]);
vals_f[1] = __halves2half2(inp_data[0][1], inp_data[0][1]);
vals_f[2] = __halves2half2(inp_data[1][0], inp_data[1][0]);
vals_f[3] = __halves2half2(inp_data[1][1], inp_data[1][1]);
}
int row = blockIdx.x * WARP_SIZE + lane;
auto offset1 = offset + row;
while (row < output_size) {
__half2 weight_h[4];
weight_h[0] = weight_cast[offset1];
weight_h[1] = weight_cast[output_size + offset1];
weight_h[2] = weight_cast[(output_size << 1) + offset1];
weight_h[3] = weight_cast[((output_size << 1) + output_size) + offset1];
{
float2 mul[4];
mul[0] = __half22float2(vals_f[0] * weight_h[0]);
mul[1] = __half22float2(vals_f[1] * weight_h[1]);
mul[2] = __half22float2(vals_f[2] * weight_h[2]);
mul[3] = __half22float2(vals_f[3] * weight_h[3]);
sum.x += mul[0].x + mul[1].x + mul[2].x + mul[3].x;
sum.y += mul[0].y + mul[1].y + mul[2].y + mul[3].y;
}
row += (gridDim.x * WARP_SIZE);
offset1 += (gridDim.x * WARP_SIZE);
}
wid += warp_num * 4;
offset += (output_size * warp_num * 4);
}
__shared__ float2 partial_result[MAX_WARP_NUM][WARP_SIZE + 1];
{
partial_result[gid][lane] = sum;
__syncthreads();
sum = partial_result[lane][gid];
__syncthreads();
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum.x += g.shfl_xor(sum.x, i);
sum.y += g.shfl_xor(sum.y, i);
}
if (lane == 0) { partial_result[0][gid] = sum; }
__syncthreads();
if (gid == 0) {
sum = partial_result[gid][lane];
int col = blockIdx.x * WARP_SIZE + lane;
if (col < output_size) { output_cast[j * output_size + col] = __float22half2_rn(sum); }
}
}
#endif
}
__global__ void tiled_gemm_kernel_gelu(float* output,
const float* vals,
const float* weight,
const float* bias,
int hidden_dim,
int input_size,
int output_size)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
float2 inp_reg[8];
float2* output_cast = reinterpret_cast<float2*>(output);
const float2* vals_cast = reinterpret_cast<const float2*>(vals);
const float2* weight_cast = reinterpret_cast<const float2*>(weight);
int input_tile = (input_size < INPUT_TILE ? input_size : INPUT_TILE);
int hidden_half = hidden_dim >> 1;
for (int j = 0; j < input_size; j += input_tile) {
__shared__ float2 input_shared[5000];
{
int k = 0;
int input_id = id;
while (input_id < hidden_half) {
inp_reg[k] = vals_cast[j * hidden_half + input_id];
inp_reg[k].x = gelu(inp_reg[k].x);
inp_reg[k].y = gelu(inp_reg[k].y);
input_shared[input_id] = inp_reg[k++];
input_id += blockDim.x;
}
b.sync();
}
int wid = gid << 1;
int offset = wid * output_size;
float2 sum;
sum.x = 0;
sum.y = 0;
while (wid < hidden_dim) {
float2 val_data;
{
val_data = input_shared[wid >> 1];
}
int row = blockIdx.x * WARP_SIZE + lane;
auto offset1 = offset + row;
while (row < output_size) {
float2 weight[2];
weight[0] = weight_cast[offset1];
weight[1] = weight_cast[output_size + offset1];
{
float2 mul[4];
mul[0].x = val_data.x * weight[0].x;
mul[0].y = val_data.x * weight[0].y;
mul[1].x = val_data.y * weight[1].x;
mul[1].y = val_data.y * weight[1].y;
sum.x += mul[0].x + mul[1].x;
sum.y += mul[0].y + mul[1].y;
}
row += (gridDim.x * WARP_SIZE);
offset1 += (gridDim.x * WARP_SIZE);
}
wid += warp_num * 2;
offset += (output_size * warp_num * 2);
}
__shared__ float2 partial_result[MAX_WARP_NUM][WARP_SIZE + 1];
{
partial_result[gid][lane] = sum;
__syncthreads();
sum = partial_result[lane][gid];
__syncthreads();
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum.x += g.shfl_xor(sum.x, i);
sum.y += g.shfl_xor(sum.y, i);
}
if (lane == 0) { partial_result[0][gid] = sum; }
__syncthreads();
if (gid == 0) {
sum = partial_result[gid][lane];
int col = blockIdx.x * WARP_SIZE + lane;
if (col < output_size) { output_cast[(j)*output_size + col] = sum; }
}
}
}
template <typename T>
void launch_tiled_gemm_kernel_gelu(T* output,
const T* vals,
const T* weight,
const T* bias,
int hidden_dim,
int input_size,
int output_size,
hipStream_t stream)
{
constexpr int threads = 1024;
output_size /= 2;
dim3 grid_dim((output_size - 1) / WARP_SIZE + 1);
dim3 block_dim(threads);
hipLaunchKernelGGL(( input_tiled_gemm_kernel), dim3(grid_dim), dim3(block_dim), 0, stream,
output, vals, weight, bias, hidden_dim, input_size, output_size);
}
template void launch_tiled_gemm_kernel_gelu(float* output,
const float* vals,
const float* weight,
const float* bias,
int hidden_dim,
int input_size,
int output_size,
hipStream_t stream);
template void launch_tiled_gemm_kernel_gelu(__half* output,
const __half* vals,
const __half* weight,
const __half* bias,
int hidden_dim,
int input_size,
int output_size,
hipStream_t stream);
__global__ void input_tiled_gemm_kernel(__half* output,
const __half* vals,
const int8_t* weight,
const __half* bias,
const __half* gamma,
const __half* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
float* qscale,
int groups)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
int quantization_stride = (hidden_dim * (output_size << 1)) / groups;
int col_index = blockIdx.x * WARP_SIZE + lane;
__half2* output_cast = reinterpret_cast<__half2*>(output);
const __half2* vals_cast = reinterpret_cast<const __half2*>(vals);
const int16_t* weight_cast = reinterpret_cast<const int16_t*>(weight);
// used for quantization scaling factor
__shared__ __half shared_quantize_scale[MAX_QUANTIZE_GROUPING];
// reading all the quantization scale into a small shared buffer
if (threadIdx.x < groups)
shared_quantize_scale[threadIdx.x] = __float2half(qscale[threadIdx.x]);
__syncthreads();
int hidden_half = hidden_dim >> 1;
for (int j = 0; j < input_size; j += INPUT_TILE) {
__shared__ __half2 input_shared[9000];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
__half2 inp_reg[8];
int k = 0;
int input_id = id;
while (input_id < hidden_half) {
inp_reg[k] = vals_cast[(j + t) * hidden_half + input_id];
input_shared[input_id + t * hidden_half] = inp_reg[k++];
input_id += blockDim.x;
}
b.sync();
float sum = 0;
for (int f = k - 1; f >= 0; f--) {
float2 inp_f = __half22float2(inp_reg[f]);
sum += inp_f.x + inp_f.y;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
__shared__ float shr[MAX_WARP_NUM];
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
float mean = sum / hidden_dim;
sum = 0.f;
for (int f = 0; f < k; f++) {
float2 inp_f = __half22float2(inp_reg[f]);
inp_f.x -= mean;
inp_f.y -= mean;
inp_reg[f] = __float22half2_rn(inp_f);
sum += inp_f.x * inp_f.x;
sum += inp_f.y * inp_f.y;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
sum /= hidden_dim;
sum += epsilon;
__half2 variance_h = __float2half2_rn(sum);
const __half2* gamma_cast = reinterpret_cast<const __half2*>(gamma);
const __half2* beta_cast = reinterpret_cast<const __half2*>(beta);
for (int f = 0; f < k; f++) {
int id = f * blockDim.x + threadIdx.x;
inp_reg[f] = inp_reg[f] * h2rsqrt(variance_h);
inp_reg[f] = inp_reg[f] * gamma_cast[id] + beta_cast[id];
input_shared[id + t * hidden_half] = inp_reg[f];
}
b.sync();
}
}
{
int wid = gid << 2;
weight_cast += (wid * output_size + col_index);
__half2 sum[INPUT_TILE];
for (int t = 0; t < INPUT_TILE; t++) { sum[t] = __float2half2_rn(0.f); }
while (wid < hidden_dim) {
// updating the quantization scale
__half2 qscale_data;
{
auto tmp = shared_quantize_scale[0];
qscale_data = __halves2half2(tmp, tmp);
if (groups > 1) {
unsigned index;
index = wid + (col_index << 1) * hidden_dim;
qscale_data = __halves2half2(
shared_quantize_scale[index / quantization_stride],
shared_quantize_scale[(index + hidden_dim) / quantization_stride]);
}
}
__half2 vals_f[4 * INPUT_TILE];
for (int t = 0; t < INPUT_TILE; t++) {
__half2 val_h[2];
val_h[0] = input_shared[t * hidden_half + (wid >> 1)];
val_h[1] = input_shared[t * hidden_half + (wid >> 1) + 1];
__half* inp_data[2];
inp_data[0] = reinterpret_cast<__half*>(&val_h[0]);
inp_data[1] = reinterpret_cast<__half*>(&val_h[1]);
vals_f[(t << 2)] = __halves2half2(inp_data[0][0], inp_data[0][0]);
vals_f[(t << 2) + 1] = __halves2half2(inp_data[0][1], inp_data[0][1]);
vals_f[(t << 2) + 2] = __halves2half2(inp_data[1][0], inp_data[1][0]);
vals_f[(t << 2) + 3] = __halves2half2(inp_data[1][1], inp_data[1][1]);
}
if (col_index < output_size) {
int16_t weight_q[loop_unroll];
#pragma unroll
for (int k = 0; k < loop_unroll; k++)
weight_q[k] = weight_cast[k * output_size];
#pragma unroll
for (int t = 0; t < INPUT_TILE; t++) {
#pragma unroll
for (int li = 0; li < loop_unroll; li++) {
float2 weight_f;
int8_t* weight_8 = reinterpret_cast<int8_t*>(&weight_q[li]);
weight_f.x = (float)weight_8[0];
weight_f.y = (float)weight_8[1];
auto mul =
__float22half2_rn(weight_f) * qscale_data * vals_f[(t << 2) + li];
if (ACC_HALF)
sum[t] += mul;
else {
float2 mul_f = __half22float2(mul);
float2 sum_f = __half22float2(sum[t]);
sum_f.x += mul_f.x;
sum_f.y += mul_f.y;
sum[t] = __float22half2_rn(sum_f);
}
}
}
}
wid += (warp_num << loop_unroll_bits);
weight_cast += (output_size * (warp_num << loop_unroll_bits));
}
__shared__ __half2 partial_result[MAX_WARP_NUM][WARP_SIZE + 1];
const __half2* bias_cast = reinterpret_cast<const __half2*>(bias);
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
partial_result[gid][lane] = sum[t];
__syncthreads();
sum[t] = partial_result[lane][gid];
if (ACC_HALF) {
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
float* sum_f = reinterpret_cast<float*>(&sum[t]);
float temp = g.shfl_xor(*sum_f, i);
__half2* sum_h = reinterpret_cast<__half2*>(&temp);
sum[t] += *sum_h;
}
} else {
float2 sum_g = __half22float2(sum[t]);
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_g.x += g.shfl_xor(sum_g.x, i);
sum_g.y += g.shfl_xor(sum_g.y, i);
}
sum[t] = __float22half2_rn(sum_g);
}
if (lane == 0) { partial_result[0][gid] = sum[0]; }
__syncthreads();
if (gid == 0) {
sum[0] = partial_result[0][lane];
if (col_index < output_size) {
float2 bias_f = __half22float2(bias_cast[col_index]);
float2 sum_g = __half22float2(sum[0]);
sum_g.x += bias_f.x;
sum_g.y += bias_f.y;
output_cast[(j + t) * output_size + col_index] =
__float22half2_rn(sum_g);
}
}
}
}
}
weight_cast = reinterpret_cast<const int16_t*>(weight);
}
#endif
}
__global__ void input_tiled_gemm_kernel(__half* output,
const __half* vals,
const __half* weight,
const __half* bias,
const __half* gamma,
const __half* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
__half2* output_cast = reinterpret_cast<__half2*>(output);
const __half2* vals_cast = reinterpret_cast<const __half2*>(vals);
const __half2* weight_cast = reinterpret_cast<const __half2*>(weight);
int hidden_half = hidden_dim >> 1;
for (int j = 0; j < input_size; j += INPUT_TILE) {
__shared__ __half2 input_shared[9000];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
__half2 inp_reg[8];
int k = 0;
int input_id = id; //(gid + warp_num * lane);
while (input_id < hidden_half) {
inp_reg[k] = vals_cast[(j + t) * hidden_half + input_id];
input_shared[input_id + t * hidden_half] = inp_reg[k++];
input_id += blockDim.x;
}
// b.sync();
float sum = 0;
for (int f = k - 1; f >= 0; f--) {
float2 inp_f = __half22float2(inp_reg[f]);
sum += inp_f.x + inp_f.y;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
__shared__ float shr[MAX_WARP_NUM];
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
float mean = sum / hidden_dim;
sum = 0.f;
for (int f = 0; f < k; f++) {
float2 inp_f = __half22float2(inp_reg[f]);
inp_f.x -= mean;
inp_f.y -= mean;
inp_reg[f] = __float22half2_rn(inp_f);
sum += inp_f.x * inp_f.x;
sum += inp_f.y * inp_f.y;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
sum /= hidden_dim;
sum += epsilon;
sum = __frsqrt_rn(sum);
__half2 variance_h = __float2half2_rn(sum);
const __half2* gamma_cast = reinterpret_cast<const __half2*>(gamma);
const __half2* beta_cast = reinterpret_cast<const __half2*>(beta);
for (int f = 0; f < k; f++) {
int id = f * blockDim.x + threadIdx.x;
inp_reg[f] = inp_reg[f] * variance_h;
inp_reg[f] = inp_reg[f] * gamma_cast[id] + beta_cast[id];
input_shared[id + t * hidden_half] = inp_reg[f];
}
b.sync();
}
}
{
int wid = gid << 2;
int offset = wid * output_size;
float2 sum[INPUT_TILE];
for (int t = 0; t < INPUT_TILE; t++) {
sum[t].x = 0;
sum[t].y = 0;
}
while (wid < hidden_dim) {
__half2 vals_f[4 * INPUT_TILE];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
__half2 val_h[2];
val_h[0] = input_shared[t * hidden_half + (wid >> 1)];
val_h[1] = input_shared[t * hidden_half + (wid >> 1) + 1];
__half* inp_data[2];
inp_data[0] = reinterpret_cast<__half*>(&val_h[0]);
inp_data[1] = reinterpret_cast<__half*>(&val_h[1]);
vals_f[(t << 2)] = __halves2half2(inp_data[0][0], inp_data[0][0]);
vals_f[(t << 2) + 1] = __halves2half2(inp_data[0][1], inp_data[0][1]);
vals_f[(t << 2) + 2] = __halves2half2(inp_data[1][0], inp_data[1][0]);
vals_f[(t << 2) + 3] = __halves2half2(inp_data[1][1], inp_data[1][1]);
}
}
int row = blockIdx.x * WARP_SIZE + lane;
auto offset1 = offset + row;
while (row < output_size) {
__half2 weight_h[4];
weight_h[0] = weight_cast[offset1];
weight_h[1] = weight_cast[output_size + offset1];
weight_h[2] = weight_cast[(output_size << 1) + offset1];
weight_h[3] = weight_cast[((output_size << 1) + output_size) + offset1];
#pragma unroll
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
float2 mul[4];
mul[0] = __half22float2(vals_f[(t << 2)] * weight_h[0]);
mul[1] = __half22float2(vals_f[(t << 2) + 1] * weight_h[1]);
mul[2] = __half22float2(vals_f[(t << 2) + 2] * weight_h[2]);
mul[3] = __half22float2(vals_f[(t << 2) + 3] * weight_h[3]);
sum[t].x += mul[0].x + mul[1].x + mul[2].x + mul[3].x;
sum[t].y += mul[0].y + mul[1].y + mul[2].y + mul[3].y;
}
}
row += (gridDim.x * WARP_SIZE);
offset1 += (gridDim.x * WARP_SIZE);
}
wid += warp_num * 4;
offset += (output_size * warp_num * 4);
}
__shared__ float2 partial_result[MAX_WARP_NUM][WARP_SIZE + 1];
const __half2* bias_cast = reinterpret_cast<const __half2*>(bias);
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
float2 sum_g = sum[t];
partial_result[gid][lane] = sum_g;
__syncthreads();
sum_g = partial_result[lane][gid];
//__syncthreads();
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_g.x += g.shfl_xor(sum_g.x, i);
sum_g.y += g.shfl_xor(sum_g.y, i);
}
if (lane == 0) { partial_result[0][gid] = sum_g; }
__syncthreads();
if (gid == 0) {
sum_g = partial_result[0][lane];
int col = blockIdx.x * WARP_SIZE + lane;
if (col < output_size) {
float2 bias_f = __half22float2(bias_cast[col]);
sum_g.x += bias_f.x;
sum_g.y += bias_f.y;
output_cast[(j + t) * output_size + col] = __float22half2_rn(sum_g);
}
}
}
}
}
}
#endif
}
__global__ void input_tiled_gemm_kernel(float* output,
const float* vals,
const float* weight,
const float* bias,
const float* gamma,
const float* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
float2* output_cast = reinterpret_cast<float2*>(output);
const float2* vals_cast = reinterpret_cast<const float2*>(vals);
const float2* weight_cast = reinterpret_cast<const float2*>(weight);
int hidden_half = hidden_dim >> 1;
for (int j = 0; j < input_size; j += INPUT_TILE) {
__shared__ float2 input_shared[5000];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
float2 inp_reg[8];
int k = 0; // Check if k goes from 0 to 7
int input_id = id; //(gid + warp_num * lane);
while (input_id < hidden_half) {
inp_reg[k] = vals_cast[(j + t) * hidden_half + input_id];
input_shared[input_id + t * hidden_half] = inp_reg[k++];
input_id += blockDim.x;
}
b.sync();
float sum = 0;
for (int f = k - 1; f >= 0; f--) { sum += inp_reg[f].x + inp_reg[f].y; }
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
__shared__ float shr[MAX_WARP_NUM];
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
float mean = sum / hidden_dim;
sum = 0.f;
for (int f = 0; f < k; f++) {
inp_reg[f].x -= mean;
inp_reg[f].y -= mean;
sum += inp_reg[f].x * inp_reg[f].x;
sum += inp_reg[f].y * inp_reg[f].y;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
sum /= hidden_dim;
sum += epsilon;
sum = __frsqrt_rn(sum);
const float2* gamma_cast = reinterpret_cast<const float2*>(gamma);
const float2* beta_cast = reinterpret_cast<const float2*>(beta);
for (int f = 0; f < k; f++) {
int id = f * blockDim.x + threadIdx.x;
inp_reg[f].x = inp_reg[f].x * sum;
inp_reg[f].y = inp_reg[f].y * sum;
inp_reg[f].x = inp_reg[f].x * gamma_cast[id].x + beta_cast[id].x;
inp_reg[f].y = inp_reg[f].y * gamma_cast[id].y + beta_cast[id].y;
input_shared[id + t * hidden_half] = inp_reg[f];
}
b.sync();
}
}
{
int wid = gid << 1;
int offset = wid * output_size;
float2 sum[INPUT_TILE];
for (int t = 0; t < INPUT_TILE; t++) {
sum[t].x = 0;
sum[t].y = 0;
}
while (wid < hidden_dim) {
float2 val_data[INPUT_TILE];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
val_data[t] = input_shared[t * hidden_half + (wid >> 1)];
}
}
int row = blockIdx.x * WARP_SIZE + lane;
auto offset1 = offset + row;
while (row < output_size) {
float2 weight[2];
weight[0] = weight_cast[offset1];
weight[1] = weight_cast[output_size + offset1];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
float2 mul[2];
mul[0].x = val_data[t].x * weight[0].x;
mul[0].y = val_data[t].x * weight[0].y;
mul[1].x = val_data[t].y * weight[1].x;
mul[1].y = val_data[t].y * weight[1].y;
sum[t].x += mul[0].x + mul[1].x;
sum[t].y += mul[0].y + mul[1].y;
}
}
row += (gridDim.x * WARP_SIZE);
offset1 += (gridDim.x * WARP_SIZE);
}
wid += warp_num * 2;
offset += (output_size * warp_num * 2);
}
__shared__ float2 partial_result[MAX_WARP_NUM][WARP_SIZE + 1];
const float2* bias_cast = reinterpret_cast<const float2*>(bias);
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
float2 sum_g = sum[t];
partial_result[gid][lane] = sum_g;
__syncthreads();
sum_g = partial_result[lane][gid];
//__syncthreads();
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_g.x += g.shfl_xor(sum_g.x, i);
sum_g.y += g.shfl_xor(sum_g.y, i);
}
if (lane == 0) { partial_result[0][gid] = sum_g; }
__syncthreads();
if (gid == 0) {
sum_g = partial_result[0][lane];
int col = blockIdx.x * WARP_SIZE + lane;
if (col < output_size) {
float2 bias_f = bias_cast[col];
sum_g.x += bias_f.x;
sum_g.y += bias_f.y;
output_cast[(j + t) * output_size + col] = sum_g;
}
}
}
}
}
}
}
template <typename T>
void launch_input_tiled_gemm_kernel(T* output,
const T* vals,
const T* weight,
const T* bias,
const T* gamma,
const T* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
hipStream_t stream)
{
constexpr int threads = 1024;
output_size /= 2;
dim3 grid_dim((output_size - 1) / WARP_SIZE + 1);
dim3 block_dim(threads);
hipLaunchKernelGGL(( input_tiled_gemm_kernel), dim3(grid_dim), dim3(block_dim), 0, stream,
output, vals, weight, bias, gamma, beta, epsilon, hidden_dim, input_size, output_size);
}
template void launch_input_tiled_gemm_kernel(float* output,
const float* vals,
const float* weight,
const float* bias,
const float* gamma,
const float* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
hipStream_t stream);
template void launch_input_tiled_gemm_kernel(__half* output,
const __half* vals,
const __half* weight,
const __half* bias,
const __half* gamma,
const __half* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
hipStream_t stream);
template <typename T>
void launch_input_tiled_gemm_kernel(T* output,
const T* vals,
const int8_t* weight,
const T* bias,
const T* gamma,
const T* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
float* scale,
int groups,
hipStream_t stream)
{
constexpr int threads = 1024;
output_size /= 2;
dim3 grid_dim((output_size - 1) / WARP_SIZE + 1);
dim3 block_dim(threads);
hipLaunchKernelGGL(( input_tiled_gemm_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, output,
vals,
weight,
bias,
gamma,
beta,
epsilon,
hidden_dim,
input_size,
output_size,
scale,
groups);
}
template void launch_input_tiled_gemm_kernel(__half* output,
const __half* vals,
const int8_t* weight,
const __half* bias,
const __half* gamma,
const __half* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
float* scale,
int groups,
hipStream_t stream);
__global__ void input_tiled_gemm_kernel_gelu(__half* output,
__half* residual_add,
const __half* vals,
const __half* residual,
const __half* input_bias,
const int8_t* weight,
const __half* bias,
const __half* gamma,
const __half* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
float* qscale,
int groups)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
__half2* output_cast = reinterpret_cast<__half2*>(output);
const __half2* vals_cast = reinterpret_cast<const __half2*>(vals);
const __half2* residual_cast = reinterpret_cast<const __half2*>(residual);
__half2* residual_add_cast = reinterpret_cast<__half2*>(residual_add);
const int16_t* weight_cast = reinterpret_cast<const int16_t*>(weight);
const __half2* input_bias_cast = reinterpret_cast<const __half2*>(input_bias);
int hidden_half = hidden_dim >> 1;
int quantization_stride = (hidden_dim * (output_size << 1)) / groups;
__shared__ __half shared_quantize_scale[MAX_QUANTIZE_GROUPING];
// reading all the quantization scale into a small shared buffer
if (threadIdx.x < groups)
shared_quantize_scale[threadIdx.x] = __float2half(qscale[threadIdx.x]);
__syncthreads();
int col_index = blockIdx.x * WARP_SIZE + lane;
for (int j = 0; j < input_size; j += INPUT_TILE) {
__shared__ __half2 input_shared[9000];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
__half2 inp_reg[8];
int k = 0;
int input_id = id;
while (input_id < hidden_half) {
inp_reg[k] = vals_cast[(j + t) * hidden_half + input_id];
float2 inp_f = __half22float2(inp_reg[k]);
float2 residual_f =
__half22float2(residual_cast[(j + t) * hidden_half + input_id]);
float2 bias_f = __half22float2(input_bias_cast[input_id]);
inp_f.x += residual_f.x + bias_f.x;
inp_f.y += residual_f.y + bias_f.y;
inp_reg[k] = __float22half2_rn(inp_f);
residual_add_cast[(j + t) * hidden_half + input_id] = inp_reg[k];
input_shared[input_id + t * hidden_half] = inp_reg[k++];
input_id += blockDim.x;
}
b.sync();
float sum = 0;
for (int f = k - 1; f >= 0; f--) {
float2 inp_f = __half22float2(inp_reg[f]);
sum += inp_f.x + inp_f.y;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
__shared__ float shr[MAX_WARP_NUM];
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
float mean = sum / hidden_dim;
sum = 0.f;
for (int f = 0; f < k; f++) {
float2 inp_f = __half22float2(inp_reg[f]);
inp_f.x -= mean;
inp_f.y -= mean;
inp_reg[f] = __float22half2_rn(inp_f);
sum += inp_f.x * inp_f.x;
sum += inp_f.y * inp_f.y;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
sum /= hidden_dim;
sum += epsilon;
__half2 variance_h = __float2half2_rn(sum);
const __half2* gamma_cast = reinterpret_cast<const __half2*>(gamma);
const __half2* beta_cast = reinterpret_cast<const __half2*>(beta);
for (int f = 0; f < k; f++) {
int id = f * blockDim.x + threadIdx.x;
inp_reg[f] = inp_reg[f] * h2rsqrt(variance_h);
inp_reg[f] = inp_reg[f] * gamma_cast[id] + beta_cast[id];
input_shared[id + t * hidden_half] = inp_reg[f];
}
b.sync();
}
}
int wid = gid << 2;
weight_cast += (wid * output_size + col_index);
__half2 sum[INPUT_TILE];
for (int t = 0; t < INPUT_TILE; t++) { sum[t] = __float2half2_rn(0.f); }
while (wid < hidden_dim) {
// updating the quantization scale
__half2 qscale_data;
{
auto tmp = shared_quantize_scale[0];
qscale_data = __halves2half2(tmp, tmp);
if (groups > 1) {
unsigned index;
index = wid + (col_index << 1) * hidden_dim;
qscale_data = __halves2half2(
shared_quantize_scale[((index / quantization_stride))],
shared_quantize_scale[((index + hidden_dim) / quantization_stride)]);
}
}
__half2 vals_f[INPUT_TILE * 4];
for (int t = 0; t < INPUT_TILE; t++) {
__half2 val_h[2];
val_h[0] = input_shared[t * hidden_half + (wid >> 1)];
val_h[1] = input_shared[t * hidden_half + (wid >> 1) + 1];
__half* inp_data[2];
inp_data[0] = reinterpret_cast<__half*>(&val_h[0]);
inp_data[1] = reinterpret_cast<__half*>(&val_h[1]);
vals_f[(t << 2)] = __halves2half2(inp_data[0][0], inp_data[0][0]);
vals_f[(t << 2) + 1] = __halves2half2(inp_data[0][1], inp_data[0][1]);
vals_f[(t << 2) + 2] = __halves2half2(inp_data[1][0], inp_data[1][0]);
vals_f[(t << 2) + 3] = __halves2half2(inp_data[1][1], inp_data[1][1]);
}
if (col_index < output_size) {
int16_t weight_q[loop_unroll];
#pragma unroll
for (int k = 0; k < loop_unroll; k++) weight_q[k] = weight_cast[k * output_size];
#pragma unroll
for (int t = 0; t < INPUT_TILE; t++) {
#pragma unroll
for (int li = 0; li < loop_unroll; li++) {
int8_t* weight_8 = reinterpret_cast<int8_t*>(&weight_q[li]);
float2 weight_f;
weight_f.x = (float)weight_8[0];
weight_f.y = (float)weight_8[1];
auto mul =
__float22half2_rn(weight_f) * qscale_data * vals_f[(t << 2) + li];
if (ACC_HALF)
sum[t] += mul;
else {
float2 mul_f = __half22float2(mul);
float2 sum_f = __half22float2(sum[t]);
sum_f.x += mul_f.x;
sum_f.y += mul_f.y;
sum[t] = __float22half2_rn(sum_f);
}
}
}
}
wid += (warp_num << loop_unroll_bits);
weight_cast += ((warp_num << loop_unroll_bits) * output_size);
}
{
__shared__ __half2 partial_result[MAX_WARP_NUM][WARP_SIZE + 1];
const __half2* bias_cast = reinterpret_cast<const __half2*>(bias);
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
partial_result[gid][lane] = sum[t];
__syncthreads();
sum[t] = partial_result[lane][gid];
if (ACC_HALF) {
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
float* sum_f = reinterpret_cast<float*>(&sum[t]);
float temp = g.shfl_xor(*sum_f, i);
__half2* sum_h = reinterpret_cast<__half2*>(&temp);
sum[t] += *sum_h;
}
} else {
float2 sum_g = __half22float2(sum[t]);
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_g.x += g.shfl_xor(sum_g.x, i);
sum_g.y += g.shfl_xor(sum_g.y, i);
}
sum[t] = __float22half2_rn(sum_g);
}
if (lane == 0) { partial_result[0][gid] = sum[t]; }
__syncthreads();
if (gid == 0) {
if (col_index < output_size) {
float2 sum_g = __half22float2(partial_result[0][lane]);
float2 bias_f = __half22float2(bias_cast[col_index]);
sum_g.x = bias_f.x + sum_g.x;
sum_g.y = bias_f.y + sum_g.y;
sum_g.x = gelu(sum_g.x);
sum_g.y = gelu(sum_g.y);
output_cast[(j + t) * output_size + col_index] =
__float22half2_rn(sum_g);
}
}
}
}
}
weight_cast = reinterpret_cast<const int16_t*>(weight);
}
#endif
}
__global__ void input_tiled_gemm_kernel_gelu(__half* output,
__half* residual_add,
const __half* vals,
const __half* residual,
const __half* input_bias,
const __half* weight,
const __half* bias,
const __half* gamma,
const __half* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
__half2* output_cast = reinterpret_cast<__half2*>(output);
const __half2* vals_cast = reinterpret_cast<const __half2*>(vals);
const __half2* residual_cast = reinterpret_cast<const __half2*>(residual);
__half2* residual_add_cast = reinterpret_cast<__half2*>(residual_add);
const __half2* weight_cast = reinterpret_cast<const __half2*>(weight);
const __half2* input_bias_cast = reinterpret_cast<const __half2*>(input_bias);
int hidden_half = hidden_dim >> 1;
for (int j = 0; j < input_size; j += INPUT_TILE) {
__shared__ __half2 input_shared[9000];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
__half2 inp_reg[8];
int k = 0;
int input_id = id;
while (input_id < hidden_half) {
inp_reg[k] = vals_cast[(j + t) * hidden_half + input_id];
float2 inp_f = __half22float2(inp_reg[k]);
float2 residual_f =
__half22float2(residual_cast[(j + t) * hidden_half + input_id]);
float2 bias_f = __half22float2(input_bias_cast[input_id]);
inp_f.x += residual_f.x + bias_f.x;
inp_f.y += residual_f.y + bias_f.y;
inp_reg[k] = __float22half2_rn(inp_f);
residual_add_cast[(j + t) * hidden_half + input_id] = inp_reg[k];
input_shared[input_id + t * hidden_half] = inp_reg[k++];
input_id += blockDim.x;
}
// b.sync();
float sum = 0;
for (int f = k - 1; f >= 0; f--) {
float2 inp_f = __half22float2(inp_reg[f]);
sum += inp_f.x + inp_f.y;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
__shared__ float shr[MAX_WARP_NUM];
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
float mean = sum / hidden_dim;
sum = 0.f;
for (int f = 0; f < k; f++) {
float2 inp_f = __half22float2(inp_reg[f]);
inp_f.x -= mean;
inp_f.y -= mean;
inp_reg[f] = __float22half2_rn(inp_f);
sum += inp_f.x * inp_f.x;
sum += inp_f.y * inp_f.y;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
sum /= hidden_dim;
sum += epsilon;
sum = __frsqrt_rn(sum);
__half2 variance_h = __float2half2_rn(sum);
const __half2* gamma_cast = reinterpret_cast<const __half2*>(gamma);
const __half2* beta_cast = reinterpret_cast<const __half2*>(beta);
for (int f = 0; f < k; f++) {
int tid = f * blockDim.x + threadIdx.x;
inp_reg[f] = inp_reg[f] * variance_h;
inp_reg[f] = inp_reg[f] * gamma_cast[tid] + beta_cast[tid];
input_shared[tid + t * hidden_half] = inp_reg[f];
// output_cast[(j + t) * hidden_half + tid] = inp_reg[f];
}
b.sync();
}
}
int wid = gid << 2;
int offset = wid * output_size;
float2 sum[INPUT_TILE];
for (int t = 0; t < INPUT_TILE; t++) {
sum[t].x = 0;
sum[t].y = 0;
}
while (wid < hidden_dim) {
__half2 vals_f[INPUT_TILE * 4];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
__half2 val_h[2];
val_h[0] = input_shared[t * hidden_half + (wid >> 1)];
val_h[1] = input_shared[t * hidden_half + (wid >> 1) + 1];
__half* inp_data[2];
inp_data[0] = reinterpret_cast<__half*>(&val_h[0]);
inp_data[1] = reinterpret_cast<__half*>(&val_h[1]);
vals_f[(t << 2)] = __halves2half2(inp_data[0][0], inp_data[0][0]);
vals_f[(t << 2) + 1] = __halves2half2(inp_data[0][1], inp_data[0][1]);
vals_f[(t << 2) + 2] = __halves2half2(inp_data[1][0], inp_data[1][0]);
vals_f[(t << 2) + 3] = __halves2half2(inp_data[1][1], inp_data[1][1]);
}
}
int row = blockIdx.x * WARP_SIZE + lane;
auto offset1 = offset + row;
while (row < output_size) {
__half2 weight_h[4];
weight_h[0] = weight_cast[offset1];
weight_h[1] = weight_cast[output_size + offset1];
weight_h[2] = weight_cast[(output_size << 1) + offset1];
weight_h[3] = weight_cast[((output_size << 1) + output_size) + offset1];
#pragma unroll
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
float2 mul[4];
mul[0] = __half22float2(vals_f[(t << 2)] * weight_h[0]);
mul[1] = __half22float2(vals_f[(t << 2) + 1] * weight_h[1]);
mul[2] = __half22float2(vals_f[(t << 2) + 2] * weight_h[2]);
mul[3] = __half22float2(vals_f[(t << 2) + 3] * weight_h[3]);
sum[t].x += mul[0].x + mul[1].x + mul[2].x + mul[3].x;
sum[t].y += mul[0].y + mul[1].y + mul[2].y + mul[3].y;
}
}
row += (gridDim.x * WARP_SIZE);
offset1 += (gridDim.x * WARP_SIZE);
}
wid += warp_num * 4;
offset += (output_size * warp_num * 4);
}
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
float2 sum_g = sum[t];
__shared__ float2 partial_result[MAX_WARP_NUM][WARP_SIZE + 1];
const __half2* bias_cast = reinterpret_cast<const __half2*>(bias);
{
partial_result[gid][lane] = sum_g;
__syncthreads();
sum_g = partial_result[lane][gid];
//__syncthreads();
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_g.x += g.shfl_xor(sum_g.x, i);
sum_g.y += g.shfl_xor(sum_g.y, i);
}
if (lane == 0) { partial_result[0][gid] = sum_g; }
__syncthreads();
if (gid == 0) {
int col = blockIdx.x * WARP_SIZE + lane;
if (col < output_size) {
sum_g = partial_result[0][lane];
float2 bias_f = __half22float2(bias_cast[col]);
sum_g.x = bias_f.x + sum_g.x;
sum_g.y = bias_f.y + sum_g.y;
sum_g.x = gelu(sum_g.x);
sum_g.y = gelu(sum_g.y);
output_cast[(j + t) * output_size + col] = __float22half2_rn(sum_g);
}
}
}
}
}
#endif
}
__global__ void input_tiled_gemm_kernel_gelu(float* output,
float* residual_add,
const float* vals,
const float* residual,
const float* input_bias,
const float* weight,
const float* bias,
const float* gamma,
const float* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
float2* output_cast = reinterpret_cast<float2*>(output);
const float2* vals_cast = reinterpret_cast<const float2*>(vals);
const float2* residual_cast = reinterpret_cast<const float2*>(residual);
float2* residual_add_cast = reinterpret_cast<float2*>(residual_add);
const float2* weight_cast = reinterpret_cast<const float2*>(weight);
const float2* input_bias_cast = reinterpret_cast<const float2*>(input_bias);
int hidden_half = hidden_dim >> 1;
for (int j = 0; j < input_size; j += INPUT_TILE) {
__shared__ float2 input_shared[5000];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
float2 inp_reg[8];
int k = 0;
int input_id = id;
while (input_id < hidden_half) {
inp_reg[k] = vals_cast[(j + t) * hidden_half + input_id];
float2 residual_f = residual_cast[(j + t) * hidden_half + input_id];
float2 bias_f = input_bias_cast[input_id];
inp_reg[k].x += residual_f.x + bias_f.x;
inp_reg[k].y += residual_f.y + bias_f.y;
residual_add_cast[(j + t) * hidden_half + input_id] = inp_reg[k];
input_shared[input_id + t * hidden_half] = inp_reg[k++];
input_id += blockDim.x;
}
b.sync();
float sum = 0;
for (int f = k - 1; f >= 0; f--) { sum += inp_reg[f].x + inp_reg[f].y; }
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
__shared__ float shr[MAX_WARP_NUM];
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
float mean = sum / hidden_dim;
sum = 0.f;
for (int f = 0; f < k; f++) {
inp_reg[f].x -= mean;
inp_reg[f].y -= mean;
sum += inp_reg[f].x * inp_reg[f].x;
sum += inp_reg[f].y * inp_reg[f].y;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
sum /= hidden_dim;
sum += epsilon;
sum = __frsqrt_rn(sum);
const float2* gamma_cast = reinterpret_cast<const float2*>(gamma);
const float2* beta_cast = reinterpret_cast<const float2*>(beta);
for (int f = 0; f < k; f++) {
int id = f * blockDim.x + threadIdx.x;
inp_reg[f].x = inp_reg[f].x * sum;
inp_reg[f].y = inp_reg[f].y * sum;
inp_reg[f].x = inp_reg[f].x * gamma_cast[id].x + beta_cast[id].x;
inp_reg[f].y = inp_reg[f].y * gamma_cast[id].y + beta_cast[id].y;
input_shared[id + t * hidden_half] = inp_reg[f];
}
b.sync();
}
}
int wid = gid << 1;
int offset = wid * output_size;
float2 sum[INPUT_TILE];
for (int t = 0; t < INPUT_TILE; t++) {
sum[t].x = 0;
sum[t].y = 0;
}
while (wid < hidden_dim) {
float2 val_data[INPUT_TILE];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
val_data[t] = input_shared[t * hidden_half + (wid >> 1)];
}
}
int row = blockIdx.x * WARP_SIZE + lane;
auto offset1 = offset + row;
while (row < output_size) {
float2 weight[2];
weight[0] = weight_cast[offset1];
weight[1] = weight_cast[output_size + offset1];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
float2 mul[2];
mul[0].x = val_data[t].x * weight[0].x;
mul[0].y = val_data[t].x * weight[0].y;
mul[1].x = val_data[t].y * weight[1].x;
mul[1].y = val_data[t].y * weight[1].y;
sum[t].x += mul[0].x + mul[1].x;
sum[t].y += mul[0].y + mul[1].y;
}
}
row += (gridDim.x * WARP_SIZE);
offset1 += (gridDim.x * WARP_SIZE);
}
wid += warp_num * 2;
offset += (output_size * warp_num * 2);
}
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
float2 sum_g = sum[t];
__shared__ float2 partial_result[MAX_WARP_NUM][WARP_SIZE + 1];
const float2* bias_cast = reinterpret_cast<const float2*>(bias);
{
partial_result[gid][lane] = sum_g;
__syncthreads();
sum_g = partial_result[lane][gid];
__syncthreads();
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_g.x += g.shfl_xor(sum_g.x, i);
sum_g.y += g.shfl_xor(sum_g.y, i);
}
if (lane == 0) { partial_result[0][gid] = sum_g; }
__syncthreads();
if (gid == 0) {
int col = blockIdx.x * WARP_SIZE + lane;
if (col < output_size) {
sum_g = partial_result[0][lane];
float2 bias_f = bias_cast[col];
sum_g.x = bias_f.x + sum_g.x;
sum_g.y = bias_f.y + sum_g.y;
sum_g.x = gelu(sum_g.x);
sum_g.y = gelu(sum_g.y);
output_cast[(j + t) * output_size + col] = sum_g;
}
}
}
}
}
}
template <typename T>
void launch_input_tiled_gemm_kernel_gelu(T* output,
T* residual_add,
const T* vals,
const T* residual,
const T* input_bias,
const T* weight,
const T* bias,
const T* gamma,
const T* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
hipStream_t stream)
{
constexpr int threads = 1024;
output_size /= 2;
dim3 grid_dim((output_size - 1) / WARP_SIZE + 1);
dim3 block_dim(threads);
hipLaunchKernelGGL(( input_tiled_gemm_kernel_gelu), dim3(grid_dim), dim3(block_dim), 0, stream, output,
residual_add,
vals,
residual,
input_bias,
weight,
bias,
gamma,
beta,
epsilon,
hidden_dim,
input_size,
output_size);
}
template void launch_input_tiled_gemm_kernel_gelu(float* output,
float* residual_add,
const float* vals,
const float* residual,
const float* input_bias,
const float* weight,
const float* bias,
const float* gamma,
const float* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
hipStream_t stream);
template void launch_input_tiled_gemm_kernel_gelu(__half* output,
__half* residual_add,
const __half* vals,
const __half* residual,
const __half* input_bias,
const __half* weight,
const __half* bias,
const __half* gamma,
const __half* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
hipStream_t stream);
template <typename T>
void launch_input_tiled_gemm_kernel_gelu(T* output,
T* residual_add,
const T* vals,
const T* residual,
const T* input_bias,
const int8_t* weight,
const T* bias,
const T* gamma,
const T* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
float* scale,
int groups,
hipStream_t stream)
{
constexpr int threads = 1024;
output_size /= 2;
dim3 grid_dim((output_size - 1) / WARP_SIZE + 1);
dim3 block_dim(threads);
hipLaunchKernelGGL(( input_tiled_gemm_kernel_gelu), dim3(grid_dim), dim3(block_dim), 0, stream, output,
residual_add,
vals,
residual,
input_bias,
weight,
bias,
gamma,
beta,
epsilon,
hidden_dim,
input_size,
output_size,
scale,
groups);
}
template void launch_input_tiled_gemm_kernel_gelu(__half* output,
__half* residual_add,
const __half* vals,
const __half* residual,
const __half* input_bias,
const int8_t* weight,
const __half* bias,
const __half* gamma,
const __half* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
float* scale,
int groups,
hipStream_t stream);
| f06a08ec86366817c8322674e234481e761b3668.cu |
#include <limits>
#include "custom_cuda_layers.h"
#include <cuda_profiler_api.h>
#include <cstdio>
#include <cstdlib>
#include <ctime>
namespace cg = cooperative_groups;
#define INPUT_TILE 1
#define INPUT_TILE1 1
// Input tile used in the gemm kernel v2
#define INPUT_TILE2 10
#define MAX_REG_SIZE 20
#define WARP_SIZE 32
#define MAX_WARP_NUM 32
#define MAX_BLOCK_SUM 8
#define loop_unroll 4
#define loop_unroll_bits 2
#define inner_loop_unroll 4
#define inner_loop_unroll_bits 2
#define INT8WIDTH 2
#define MAX_QUANTIZE_GROUPING 1024
#define ACC_HALF true
inline __device__ float gelu(const float x)
{
float y = 0.5 * x * (1.0 + tanhf(0.7978845608028654 * x * (1.0 + 0.044715 * x * x)));
return y;
}
__global__ void input_tiled_gemm_kernel_v2(__half* output,
const __half* vals,
const int8_t* weight,
const __half* bias,
unsigned hidden_dim,
unsigned block_reduce,
unsigned input_size,
unsigned output_size,
unsigned outputBlocks,
unsigned blockStride,
float* qscale,
unsigned groups,
__half* block_sums,
unsigned merge_count = 1,
unsigned quantization_stride = 1,
bool add_gelu = false)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
__half2* output_cast =
reinterpret_cast<__half2*>(((gridDim.x == outputBlocks) ? output : block_sums));
const __half2* vals_cast = reinterpret_cast<const __half2*>(vals);
const int16_t* weight_cast = reinterpret_cast<const int16_t*>(weight);
output_cast += ((blockIdx.x / outputBlocks) * (output_size));
weight_cast += ((blockIdx.x / outputBlocks) * blockStride);
vals_cast += (unsigned)(blockIdx.x / outputBlocks) * (hidden_dim >> 1);
// reading all the quantization scale into a small shared buffer
__shared__ __half shared_quantize_scale[MAX_QUANTIZE_GROUPING];
int merge_hidden = hidden_dim >> merge_count;
if (threadIdx.x < (groups << merge_count))
shared_quantize_scale[threadIdx.x] = __float2half(qscale[threadIdx.x]);
__syncthreads();
for (int j = 0; j < input_size; j += (INPUT_TILE2)) {
__half2 sum[INPUT_TILE2];
#pragma unroll
for (int t = 0; t < INPUT_TILE2; t++) sum[t] = __float2half2_rn(0.f);
{
int wid = gid << 2;
weight_cast += (wid * output_size + (blockIdx.x % outputBlocks) * WARP_SIZE + lane);
while (wid < hidden_dim) {
// updating the quantization scale
__half2 qscale_data;
{
auto tmp = shared_quantize_scale[0];
qscale_data = __halves2half2(tmp, tmp);
if (groups > 1) {
unsigned index = (blockIdx.x % outputBlocks) * WARP_SIZE + lane;
unsigned merge_index = wid / merge_hidden;
index = (wid - merge_index * merge_hidden) + (index << 1) * merge_hidden;
qscale_data = __halves2half2(
shared_quantize_scale[((index / quantization_stride) << merge_count) +
merge_index],
shared_quantize_scale[(((index + merge_hidden) / quantization_stride)
<< merge_count) +
merge_index]);
}
}
// Read the input
__shared__ __half2 vals_h[(loop_unroll >> 1) * INPUT_TILE2 * MAX_WARP_NUM];
{
// we read (loop_unroll >> 2) half-2 values per lane, and for 2 times of the
// INPUT_TILE this makes more threads engaged in reading data from shared memory
// into registers!
if (lane < (INPUT_TILE2 << 1)) {
if (((lane >> 1) + j) < input_size) {
// here, we consider loop_unroll is always higher that 4!
unsigned int inp_id = ((lane % 2) << (loop_unroll_bits - 2));
unsigned int offset =
(j + (lane >> 1)) * (block_reduce * (hidden_dim >> 1)) + inp_id;
#pragma unroll
for (int li = 0; li < (loop_unroll >> 2); li++) {
vals_h[li + inp_id + (((lane >> 1) << (loop_unroll_bits - 1))) +
(gid << (loop_unroll_bits - 1)) * INPUT_TILE2] =
vals_cast[offset + (wid >> 1) + li];
}
}
}
g.sync();
}
int col_index = (blockIdx.x % outputBlocks) * WARP_SIZE + lane;
if (col_index < output_size) {
__half2 weight_h[loop_unroll];
{
int16_t weight_q[loop_unroll];
#pragma unroll
for (int k = 0; k < loop_unroll; k++)
if ((k + wid) < hidden_dim) weight_q[k] = weight_cast[k * output_size];
#pragma unroll
for (int k = 0; k < loop_unroll; k++) {
int8_t* weight_8 = reinterpret_cast<int8_t*>(&weight_q[k]);
weight_h[k] = __halves2half2(__float2half((float)weight_8[0]),
__float2half((float)weight_8[1])) *
qscale_data;
}
}
// matrix-matrix multiply
#pragma unroll
for (int t = 0; t < INPUT_TILE2; t++) {
if ((t + j) < input_size) {
#pragma unroll
for (int li = 0; li < loop_unroll; li++) {
__half* val_h = reinterpret_cast<__half*>(
&vals_h[(t << (loop_unroll_bits - 1)) + (li >> 1) +
(gid << (loop_unroll_bits - 1)) * INPUT_TILE2]);
auto mul =
weight_h[li] * __halves2half2(val_h[li % 2], val_h[li % 2]);
if (ACC_HALF)
sum[t] += mul;
else {
float2 mul_f = __half22float2(mul);
float2 sum_f = __half22float2(sum[t]);
sum_f.x += mul_f.x;
sum_f.y += mul_f.y;
sum[t] = __float22half2_rn(sum_f);
}
}
}
}
}
wid += (warp_num << loop_unroll_bits);
weight_cast += (output_size * (warp_num << loop_unroll_bits));
}
}
const __half2* bias_cast;
if (bias) bias_cast = reinterpret_cast<const __half2*>(bias);
__shared__ __half2 partial_result[2 * MAX_WARP_NUM * (WARP_SIZE + 2)];
for (int t = 0; t < INPUT_TILE2; t += 2) {
if ((t + j) < input_size) {
partial_result[(gid << 1) * (WARP_SIZE + 2) + (lane << 1)] = sum[t];
partial_result[(gid << 1) * (WARP_SIZE + 2) + (lane << 1) + 1] = sum[t + 1];
b.sync();
if (ACC_HALF) {
sum[t] = partial_result[(lane << 1) * (WARP_SIZE + 2) + (gid << 1)];
sum[t + 1] = partial_result[(lane << 1) * (WARP_SIZE + 2) + (gid << 1) + 1];
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
float temp[2];
float* sum_f[2];
__half2* sum_h[2];
sum_f[0] = reinterpret_cast<float*>(&sum[t]);
sum_f[1] = reinterpret_cast<float*>(&sum[t + 1]);
temp[0] = g.shfl_xor(*sum_f[0], i);
temp[1] = g.shfl_xor(*sum_f[1], i);
sum_h[0] = reinterpret_cast<__half2*>(&temp[0]);
sum_h[1] = reinterpret_cast<__half2*>(&temp[1]);
sum[t] += *sum_h[0];
sum[t + 1] += *sum_h[1];
}
if (lane == 0) {
partial_result[(gid << 1)] = sum[t];
partial_result[(gid << 1) + 1] = sum[t + 1];
}
} else {
float2 sum_f[2];
sum_f[0] =
__half22float2(partial_result[(lane << 1) * (WARP_SIZE + 2) + (gid << 1)]);
sum_f[1] = __half22float2(
partial_result[(lane << 1) * (WARP_SIZE + 2) + (gid << 1) + 1]);
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_f[0].x += g.shfl_xor(sum_f[0].x, i);
sum_f[0].y += g.shfl_xor(sum_f[0].y, i);
sum_f[1].x += g.shfl_xor(sum_f[1].x, i);
sum_f[1].y += g.shfl_xor(sum_f[1].y, i);
}
if (lane == 0) {
partial_result[(gid << 1)] = __float22half2_rn(sum_f[0]);
partial_result[(gid << 1) + 1] = __float22half2_rn(sum_f[1]);
}
}
b.sync();
if (gid == (t >> 1)) {
sum[0] = partial_result[(lane << 1)];
sum[1] = partial_result[(lane << 1) + 1];
}
}
}
if ((gid << 1) < INPUT_TILE2 && ((gid << 1) + j) < input_size) {
int col_index = (blockIdx.x % outputBlocks) * WARP_SIZE + lane;
if (col_index < output_size) {
if (bias && blockIdx.x < outputBlocks) {
__half2 bias_h = bias_cast[col_index];
float2 bias_f = __half22float2(bias_h);
float2 sum_f[2];
sum_f[0] = __half22float2(sum[0]);
sum_f[1] = __half22float2(sum[1]);
sum_f[0].x += bias_f.x;
sum_f[0].y += bias_f.y;
sum_f[1].x += bias_f.x;
sum_f[1].y += bias_f.y;
if (add_gelu && gridDim.x == outputBlocks) {
sum_f[0].x = gelu(sum_f[0].x);
sum_f[1].x = gelu(sum_f[1].x);
sum_f[0].y = gelu(sum_f[0].y);
sum_f[1].y = gelu(sum_f[1].y);
}
sum[0] = __float22half2_rn(sum_f[0]);
sum[1] = __float22half2_rn(sum_f[1]);
}
output_cast[col_index + (j + (gid << 1)) * (block_reduce * output_size)] = (sum[0]);
if ((input_size - ((gid << 1) + j)) > 1)
output_cast[col_index + (j + (gid << 1) + 1) * (block_reduce * output_size)] =
(sum[1]);
}
}
weight_cast = reinterpret_cast<const int16_t*>(weight);
weight_cast += ((blockIdx.x / outputBlocks) * blockStride);
}
}
__global__ void input_tiled_gemm_kernel_v2(float* output,
const float* vals,
const float* weight,
const float* bias,
float* block_sums,
int hidden_dim,
int block_reduce,
int input_size,
int output_size,
int outputBlocks,
int blockStride,
bool add_gelu = false)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
unsigned int gid = threadIdx.x >> 5;
unsigned int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
float2* output_cast =
reinterpret_cast<float2*>(((gridDim.x == outputBlocks) ? output : block_sums));
const float2* vals_cast = reinterpret_cast<const float2*>(vals);
const float2* weight_cast = reinterpret_cast<const float2*>(weight);
output_cast += (unsigned)(blockIdx.x / outputBlocks) * (output_size);
int hidden_half = hidden_dim >> 1;
weight_cast += ((unsigned)(blockIdx.x / outputBlocks) * blockStride);
vals_cast += (unsigned)(blockIdx.x / outputBlocks) * hidden_half;
for (int j = 0; j < input_size; j += (INPUT_TILE2)) {
float2 sum[INPUT_TILE2];
#pragma unroll
for (int t = 0; t < (INPUT_TILE2); t++) {
sum[t].x = 0;
sum[t].y = 0;
}
{
int wid = gid << 1;
int offset = wid * output_size;
while (wid < hidden_dim) {
float2 val_data[INPUT_TILE2];
{
for (int t = 0; t < INPUT_TILE2; t++) {
if ((t + j) < input_size) {
val_data[t] =
vals_cast[(j + t) * (hidden_half * block_reduce) + (wid >> 1)];
}
}
}
int row = (blockIdx.x % outputBlocks) * WARP_SIZE + lane;
auto offset1 = offset + row;
while (row < output_size) {
float2 weight[2];
weight[0] = weight_cast[offset1];
weight[1] = weight_cast[output_size + offset1];
for (int t = 0; t < INPUT_TILE2; t++) {
if ((t + j) < input_size) {
float2 mul[2];
mul[0].x = val_data[t].x * weight[0].x;
mul[0].y = val_data[t].x * weight[0].y;
mul[1].x = val_data[t].y * weight[1].x;
mul[1].y = val_data[t].y * weight[1].y;
sum[t].x += mul[0].x + mul[1].x;
sum[t].y += mul[0].y + mul[1].y;
}
}
row += (gridDim.x * WARP_SIZE);
offset1 += (gridDim.x * WARP_SIZE);
}
wid += warp_num * 2;
offset += (output_size * warp_num * 2);
}
}
{
const float2* bias_cast;
if (bias) bias_cast = reinterpret_cast<const float2*>(bias);
__shared__ float2 partial_result[MAX_WARP_NUM * (WARP_SIZE + 1)];
for (int t = 0; t < (INPUT_TILE2); t++) {
if ((t + j) < input_size) {
float2 sum_g = sum[t];
partial_result[gid * (WARP_SIZE + 1) + lane] = sum_g;
__syncthreads();
sum_g = partial_result[lane * (WARP_SIZE + 1) + gid];
__syncthreads();
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_g.x += g.shfl_xor(sum_g.x, i);
sum_g.y += g.shfl_xor(sum_g.y, i);
}
if (lane == 0) { partial_result[gid] = sum_g; }
__syncthreads();
sum[t] = partial_result[lane];
}
}
if (gid < INPUT_TILE2 && ((gid + j) < input_size)) {
int col = (blockIdx.x % outputBlocks) * WARP_SIZE + lane;
if (col < output_size) {
if (bias && blockIdx.x < outputBlocks) {
float2 bias_f = bias_cast[col];
sum[gid].x += bias_f.x;
sum[gid].y += bias_f.y;
if (add_gelu && gridDim.x == outputBlocks) {
sum[gid].x = gelu(sum[gid].x);
sum[gid].y = gelu(sum[gid].y);
}
}
output_cast[col + (j + gid) * (output_size * block_reduce)] = sum[gid];
}
}
}
}
}
__global__ void input_tiled_gemm_kernel_v2(__half* output,
const __half* vals,
const __half* weight,
const __half* bias,
__half* block_sums,
unsigned int hidden_dim,
unsigned int block_reduce,
unsigned int input_size,
unsigned int output_size,
unsigned int outputBlocks,
unsigned int blockStride,
bool add_gelu = false)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
unsigned int gid = threadIdx.x >> 5;
unsigned int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
__half2* output_cast =
reinterpret_cast<__half2*>(((gridDim.x == outputBlocks) ? output : block_sums));
const __half2* vals_cast = reinterpret_cast<const __half2*>(vals);
const __half2* weight_cast = reinterpret_cast<const __half2*>(weight);
output_cast += (unsigned)(blockIdx.x / outputBlocks) * (output_size);
int hidden_half = hidden_dim >> 1;
weight_cast += ((unsigned)(blockIdx.x / outputBlocks) * blockStride);
vals_cast += (unsigned)(blockIdx.x / outputBlocks) * hidden_half;
for (int j = 0; j < input_size; j += (INPUT_TILE2)) {
__half2 sum[INPUT_TILE2];
#pragma unroll
for (int t = 0; t < INPUT_TILE2; t++) { sum[t] = __float2half2_rn(0.f); }
{
int wid = gid << loop_unroll_bits;
weight_cast += wid * output_size + (blockIdx.x % outputBlocks) * WARP_SIZE + lane;
while (wid < hidden_dim) {
__shared__ __half2 vals_h[(loop_unroll >> 1) * INPUT_TILE2 * MAX_WARP_NUM];
{
// we read (loop_unroll >> 2) half-2 values per lane, and for 2 times of the
// INPUT_TILE this makes more threads engaged in reading data from shared memory
// into registers!
if (lane < (INPUT_TILE2 << 1)) {
if (((lane >> 1) + j) < input_size) {
// here, we consider loop_unroll is always higher that 4!
unsigned int inp_id = ((lane % 2) << (loop_unroll_bits - 2));
unsigned int offset =
(j + (lane >> 1)) * (block_reduce * (hidden_dim >> 1)) + inp_id;
#pragma unroll
for (int li = 0; li < (loop_unroll >> 2); li++) {
vals_h[li + inp_id + (((lane >> 1) << (loop_unroll_bits - 1))) +
(gid << (loop_unroll_bits - 1)) * INPUT_TILE2] =
vals_cast[offset + (wid >> 1) + li];
}
}
}
g.sync();
}
int col = (blockIdx.x % outputBlocks) * WARP_SIZE + lane;
if (col < output_size) {
__half2 weight_h[loop_unroll];
#pragma unroll
for (int k = 0; k < loop_unroll; k++)
weight_h[k] = weight_cast[output_size * k];
#pragma unroll
for (int t = 0; t < INPUT_TILE2; t++) {
float2 sum_f;
if (!ACC_HALF) sum_f = __half22float2(sum[t]);
#pragma unroll
for (int li = 0; li < (loop_unroll >> 1); li++) {
__half* inp_data = reinterpret_cast<__half*>(
&vals_h[(t << (loop_unroll_bits - 1)) + li +
(gid << (loop_unroll_bits - 1)) * INPUT_TILE2]);
#pragma unroll
for (int k = 0; k < 2; k++) {
if (ACC_HALF)
sum[t] += __halves2half2(inp_data[k], inp_data[k]) *
weight_h[(li << 1) + k];
else {
float2 weight_f =
__half22float2(__halves2half2(inp_data[k], inp_data[k]) *
weight_h[(li << 1) + k]);
sum_f.x += weight_f.x;
sum_f.y += weight_f.y;
}
}
}
if (!ACC_HALF) sum[t] = __float22half2_rn(sum_f);
}
}
wid += warp_num << loop_unroll_bits;
weight_cast += (output_size * (warp_num << loop_unroll_bits));
}
}
{
const __half2* bias_cast;
if (bias) bias_cast = reinterpret_cast<const __half2*>(bias);
__shared__ __half2 partial_result[2 * MAX_WARP_NUM * (WARP_SIZE + 2)];
for (int t = 0; t < INPUT_TILE2; t += 2) {
if ((t + j) < input_size) {
partial_result[(gid << 1) * (WARP_SIZE + 2) + (lane << 1)] = sum[t];
partial_result[(gid << 1) * (WARP_SIZE + 2) + (lane << 1) + 1] = sum[t + 1];
b.sync();
float2 sum_f[2];
sum_f[0] =
__half22float2(partial_result[(lane << 1) * (WARP_SIZE + 2) + (gid << 1)]);
sum_f[1] = __half22float2(
partial_result[(lane << 1) * (WARP_SIZE + 2) + (gid << 1) + 1]);
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_f[0].x += g.shfl_xor(sum_f[0].x, i);
sum_f[1].y += g.shfl_xor(sum_f[1].y, i);
sum_f[1].x += g.shfl_xor(sum_f[1].x, i);
sum_f[0].y += g.shfl_xor(sum_f[0].y, i);
}
if (lane == 0) {
partial_result[(gid << 1)] = __float22half2_rn(sum_f[0]);
partial_result[(gid << 1) + 1] = __float22half2_rn(sum_f[1]);
}
b.sync();
if (gid == (t >> 1)) {
sum[t] = partial_result[(lane << 1)];
sum[t + 1] = partial_result[(lane << 1) + 1];
}
}
}
if ((gid << 1) < INPUT_TILE2 && ((gid << 1) + j) < input_size) {
int col = (blockIdx.x % outputBlocks) * WARP_SIZE + lane;
if (col < output_size) {
if (bias && blockIdx.x < outputBlocks) {
__half2 bias_h = bias_cast[col];
float2 bias_f = __half22float2(bias_h);
float2 sum_f[2];
sum_f[0] = __half22float2(sum[(gid << 1)]);
sum_f[1] = __half22float2(sum[(gid << 1) + 1]);
sum_f[0].x += bias_f.x;
sum_f[0].y += bias_f.y;
sum_f[1].x += bias_f.x;
sum_f[1].y += bias_f.y;
if (add_gelu && gridDim.x == outputBlocks) {
sum_f[0].x = gelu(sum_f[0].x);
sum_f[0].y = gelu(sum_f[0].y);
sum_f[1].x = gelu(sum_f[1].x);
sum_f[1].y = gelu(sum_f[1].y);
}
sum[(gid << 1)] = __float22half2_rn(sum_f[0]);
sum[(gid << 1) + 1] = __float22half2_rn(sum_f[0]);
}
output_cast[col + (j + (gid << 1)) * (block_reduce * output_size)] =
(sum[(gid << 1)]);
if (((gid << 1) + j + 1) < input_size)
output_cast[col + (j + (gid << 1) + 1) * (block_reduce * output_size)] =
(sum[(gid << 1) + 1]);
}
}
}
weight_cast = reinterpret_cast<const __half2*>(weight);
weight_cast += ((blockIdx.x / outputBlocks) * blockStride);
}
#endif
}
__global__ void block_reduce_kernel(float* output,
float* block_sums,
int batch,
int output_size,
bool add_gelu = false)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
unsigned total_count = batch * output_size;
unsigned int gid = threadIdx.x >> 5;
unsigned int lane = threadIdx.x & 0x1f;
unsigned int warp_num = blockDim.x >> 5;
float2* output_cast = reinterpret_cast<float2*>(output);
float2* block_sums_cast = reinterpret_cast<float2*>(block_sums);
unsigned int col_index = blockIdx.x * WARP_SIZE + lane;
block_sums_cast += gid * output_size;
if (col_index < total_count) {
__shared__ float2 data_shared[MAX_WARP_NUM * (WARP_SIZE + 1)];
data_shared[gid * (WARP_SIZE) + lane] =
block_sums_cast[(col_index / output_size) * (warp_num * output_size) +
col_index % output_size];
b.sync();
float2 data = data_shared[(lane % warp_num) * WARP_SIZE + gid * (WARP_SIZE / warp_num) +
(lane / warp_num)];
b.sync();
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
data.x += g.shfl_down(data.x, i);
data.y += g.shfl_down(data.y, i);
}
if ((lane % warp_num) == 0) {
if (add_gelu) {
data.x = gelu(data.x);
data.y = gelu(data.y);
}
data_shared[gid * (WARP_SIZE / warp_num) + (lane / warp_num)] = (data);
}
b.sync();
if (gid == 0) output_cast[col_index] = data_shared[lane];
}
}
__global__ void block_reduce_kernel(__half* output,
__half* block_sums,
unsigned batch,
unsigned int output_size,
bool add_gelu = false)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
unsigned total_count = batch * output_size;
unsigned int gid = threadIdx.x >> 5;
unsigned int lane = threadIdx.x & 0x1f;
unsigned int warp_num = blockDim.x >> 5;
__half2* output_cast = reinterpret_cast<__half2*>(output);
__half2* block_sums_cast = reinterpret_cast<__half2*>(block_sums);
unsigned int col_index = blockIdx.x * WARP_SIZE + lane;
block_sums_cast += gid * output_size;
if (col_index < total_count) {
__shared__ __half2 data_shared[MAX_WARP_NUM * (WARP_SIZE + 1)];
data_shared[gid * (WARP_SIZE) + lane] =
block_sums_cast[(col_index / output_size) * (warp_num * output_size) +
col_index % output_size];
b.sync();
float2 data = __half22float2(data_shared[(lane % warp_num) * WARP_SIZE +
gid * (WARP_SIZE / warp_num) + (lane / warp_num)]);
b.sync();
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
data.x += g.shfl_down(data.x, i);
data.y += g.shfl_down(data.y, i);
}
if ((lane % warp_num) == 0) {
if (add_gelu) {
data.x = gelu(data.x);
data.y = gelu(data.y);
}
data_shared[gid * (WARP_SIZE / warp_num) + (lane / warp_num)] = __float22half2_rn(data);
}
b.sync();
if (gid == 0) output_cast[col_index] = data_shared[lane];
}
}
template <typename T>
void launch_input_tiled_gemm_kernel_v2(T* output,
const T* vals,
const int8_t* weight,
const T* bias,
unsigned int hidden_dim,
unsigned int input_size,
unsigned int output_size,
float* scale,
unsigned int groups,
unsigned int merge_count,
T* block_sums,
bool add_gelu,
cudaStream_t stream)
{
output_size /= 2;
int outputBlocks = (output_size - 1) / WARP_SIZE + 1;
int block_reduce = (SMs > outputBlocks ? SMs / outputBlocks : 1);
int br2 = (int)log2(block_reduce);
block_reduce = (int)pow(2.0, (float)br2);
constexpr int threads = 1024;
int blockStride = (output_size * hidden_dim) / block_reduce;
dim3 grid_dim(outputBlocks * block_reduce);
dim3 block_dim(threads);
input_tiled_gemm_kernel_v2<<<grid_dim, block_dim, 0, stream>>>(
output,
vals,
weight,
bias,
hidden_dim / block_reduce,
block_reduce,
input_size,
output_size,
outputBlocks,
blockStride,
scale,
groups,
block_sums,
merge_count,
((hidden_dim >> merge_count) * (output_size << 1)) / groups,
add_gelu);
if (block_reduce > 1) {
dim3 grids(((output_size * input_size) - 1) / WARP_SIZE + 1);
dim3 blocks(block_reduce * WARP_SIZE);
block_reduce_kernel<<<grids, blocks, 0, stream>>>(
output, block_sums, input_size, (output_size), add_gelu);
}
}
template <typename T>
void launch_input_tiled_gemm_kernel_v2(T* output,
const T* vals,
const T* weight,
const T* bias,
T* block_sums,
unsigned int hidden_dim,
unsigned int input_size,
unsigned int output_size,
bool add_gelu,
cudaStream_t stream)
{
output_size /= 2;
int outputBlocks = (output_size - 1) / WARP_SIZE + 1;
int block_reduce = (SMs > outputBlocks ? SMs / outputBlocks : 1);
int br2 = (int)log2(block_reduce);
block_reduce = (int)pow(2.0, (float)br2);
constexpr int threads = 1024;
int blockStride = (output_size * hidden_dim) / block_reduce;
dim3 grid_dim(outputBlocks * block_reduce);
dim3 block_dim(threads);
input_tiled_gemm_kernel_v2<<<grid_dim, block_dim, 0, stream>>>(output,
vals,
weight,
bias,
block_sums,
hidden_dim / block_reduce,
block_reduce,
input_size,
output_size,
outputBlocks,
blockStride,
add_gelu);
if (block_reduce > 1) {
dim3 grids(((output_size * input_size) - 1) / WARP_SIZE + 1);
dim3 blocks(block_reduce * WARP_SIZE);
block_reduce_kernel<<<grids, blocks, 0, stream>>>(
output, block_sums, input_size, (output_size), add_gelu);
}
}
template void launch_input_tiled_gemm_kernel_v2(__half* output,
const __half* vals,
const __half* weight,
const __half* bias,
__half* block_sums,
unsigned int hidden_dim,
unsigned int input_size,
unsigned int output_size,
bool add_gelu,
cudaStream_t stream);
template void launch_input_tiled_gemm_kernel_v2(float* output,
const float* vals,
const float* weight,
const float* bias,
float* block_sums,
unsigned int hidden_dim,
unsigned int input_size,
unsigned int output_size,
bool add_gelu,
cudaStream_t stream);
template void launch_input_tiled_gemm_kernel_v2(__half* output,
const __half* vals,
const int8_t* weight,
const __half* bias,
unsigned int hidden_dim,
unsigned int input_size,
unsigned int output_size,
float* scale,
unsigned int groups,
unsigned int merge_count,
__half* block_sums,
bool add_gelu,
cudaStream_t stream);
__global__ void input_tiled_gemm_kernel(__half* output,
const __half* vals,
const int8_t* weight,
const __half* bias,
int hidden_dim,
int input_size,
int output_size,
float* qscale,
int groups,
int merge_count = 1)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
__half2* output_cast = reinterpret_cast<__half2*>(output);
const __half2* vals_cast = reinterpret_cast<const __half2*>(vals);
const int16_t* weight_cast = reinterpret_cast<const int16_t*>(weight);
int hidden_half = hidden_dim >> 1;
int merge_hidden = hidden_dim >> merge_count;
int quantization_stride = (merge_hidden * (output_size << 1)) / groups;
// reading all the quantization scale into a small shared buffer
__shared__ __half shared_quantize_scale[MAX_QUANTIZE_GROUPING];
if (threadIdx.x < (groups << merge_count))
shared_quantize_scale[threadIdx.x] = __float2half(qscale[threadIdx.x]);
__syncthreads();
int col_index = blockIdx.x * WARP_SIZE + lane;
for (int j = 0; j < input_size; j += (INPUT_TILE1)) {
__half2 sum[INPUT_TILE1];
#pragma unroll
for (int t = 0; t < INPUT_TILE1; t++) sum[t] = __float2half2_rn(0.f);
{
int wid = gid << 2;
weight_cast += (wid * output_size + col_index);
while (wid < hidden_dim) {
// updating the quantization scale
__half2 qscale_data;
{
auto tmp = shared_quantize_scale[0];
qscale_data = __halves2half2(tmp, tmp);
if (groups > 1) {
unsigned index;
unsigned merge_index = wid / merge_hidden;
index =
(wid - merge_index * merge_hidden) + (col_index << 1) * merge_hidden;
qscale_data = __halves2half2(
shared_quantize_scale[((index / quantization_stride) << merge_count) +
merge_index],
shared_quantize_scale[(((index + merge_hidden) / quantization_stride)
<< merge_count) +
merge_index]);
}
}
__half2 vals_f[INPUT_TILE1 * loop_unroll];
#pragma unroll
for (int t = 0; t < INPUT_TILE1; t++) {
__half2 val_h[loop_unroll >> 1];
val_h[0] = vals_cast[(j + t) * hidden_half + (wid >> 1)];
val_h[1] = vals_cast[(j + t) * hidden_half + (wid >> 1) + 1];
__half* inp_data[2];
inp_data[0] = reinterpret_cast<__half*>(&val_h[0]);
inp_data[1] = reinterpret_cast<__half*>(&val_h[1]);
vals_f[(t << 2)] = __halves2half2(inp_data[0][0], inp_data[0][0]);
vals_f[(t << 2) + 1] = __halves2half2(inp_data[0][1], inp_data[0][1]);
vals_f[(t << 2) + 2] = __halves2half2(inp_data[1][0], inp_data[1][0]);
vals_f[(t << 2) + 3] = __halves2half2(inp_data[1][1], inp_data[1][1]);
}
if (col_index < output_size) {
int16_t weight_q[loop_unroll];
#pragma unroll
for (int k = 0; k < loop_unroll; k++)
weight_q[k] = weight_cast[k * output_size];
#pragma unroll
for (int t = 0; t < INPUT_TILE1; t++) {
#pragma unroll
for (int li = 0; li < loop_unroll; li++) {
float2 weight_f;
int8_t* weight_8 = reinterpret_cast<int8_t*>(&weight_q[li]);
weight_f.x = (float)weight_8[0];
weight_f.y = (float)weight_8[1];
auto mul =
__float22half2_rn(weight_f) * qscale_data * vals_f[(t << 2) + li];
if (ACC_HALF)
sum[t] += mul;
else {
float2 mul_f = __half22float2(mul);
float2 sum_f = __half22float2(sum[t]);
sum_f.x += mul_f.x;
sum_f.y += mul_f.y;
sum[t] = __float22half2_rn(sum_f);
}
}
}
}
wid += (warp_num << loop_unroll_bits);
weight_cast += (output_size * (warp_num << loop_unroll_bits));
}
}
{
const __half2* bias_cast;
if (bias) bias_cast = reinterpret_cast<const __half2*>(bias);
__shared__ __half2 partial_result[MAX_WARP_NUM * (WARP_SIZE + 1)];
for (int t = 0; t < INPUT_TILE1; t++) {
partial_result[gid * (WARP_SIZE + 1) + lane] = sum[t];
__syncthreads();
sum[t] = partial_result[lane * (WARP_SIZE + 1) + gid];
if (ACC_HALF) {
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
float* sum_f = reinterpret_cast<float*>(&sum[t]);
float temp = g.shfl_xor(*sum_f, i);
__half2* sum_h = reinterpret_cast<__half2*>(&temp);
sum[t] += *sum_h;
}
} else {
float2 sum_f = __half22float2(sum[t]);
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_f.x += g.shfl_xor(sum_f.x, i);
sum_f.y += g.shfl_xor(sum_f.y, i);
}
sum[t] = __float22half2_rn(sum_f);
}
if (lane == 0) { partial_result[gid] = sum[t]; }
__syncthreads();
if (gid == t) sum[0] = partial_result[lane];
}
if (gid < (INPUT_TILE1) && (gid + j) < input_size && col_index < output_size) {
if (bias) {
float2 bias_f = __half22float2(bias_cast[col_index]);
float2 sum_f = __half22float2(sum[0]);
sum_f.x += bias_f.x;
sum_f.y += bias_f.y;
sum[0] = __float22half2_rn(sum_f);
}
output_cast[col_index + (j + gid) * output_size] = sum[0];
}
}
weight_cast = reinterpret_cast<const int16_t*>(weight);
}
}
__global__ void input_tiled_gemm_kernel(__half* output,
const __half* vals,
const __half* weight,
const __half* bias,
int hidden_dim,
int input_size,
int output_size)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
__half2* output_cast = reinterpret_cast<__half2*>(output);
const __half2* vals_cast = reinterpret_cast<const __half2*>(vals);
const __half2* weight_cast = reinterpret_cast<const __half2*>(weight);
unsigned int col_index = blockIdx.x * WARP_SIZE + lane;
int hidden_half = hidden_dim >> 1;
for (int j = 0; j < input_size; j += (INPUT_TILE1)) {
__half2 sum[INPUT_TILE1];
#pragma unroll
for (int t = 0; t < (INPUT_TILE1); t++) { sum[t] = __float2half2_rn(0.f); }
{
int wid = gid << loop_unroll_bits;
weight_cast += (wid * output_size + col_index);
while (wid < hidden_dim) {
__half2 vals_f[loop_unroll * (INPUT_TILE1)];
{
for (int t = 0; t < (INPUT_TILE1); t++) {
if ((t + j) < input_size) {
__half2 val_h[2];
val_h[0] = vals_cast[(j + t) * hidden_half + (wid >> 1)];
val_h[1] = vals_cast[(j + t) * hidden_half + (wid >> 1) + 1];
__half* inp_data[2];
inp_data[0] = reinterpret_cast<__half*>(&val_h[0]);
inp_data[1] = reinterpret_cast<__half*>(&val_h[1]);
vals_f[(t << 2)] = __halves2half2(inp_data[0][0], inp_data[0][0]);
vals_f[(t << 2) + 1] = __halves2half2(inp_data[0][1], inp_data[0][1]);
vals_f[(t << 2) + 2] = __halves2half2(inp_data[1][0], inp_data[1][0]);
vals_f[(t << 2) + 3] = __halves2half2(inp_data[1][1], inp_data[1][1]);
}
}
}
if (col_index < output_size) {
__half2 weight_h[loop_unroll];
#pragma unroll
for (int k = 0; k < loop_unroll; k++) {
if ((k + wid) < hidden_dim)
weight_h[k] = weight_cast[k * output_size];
else
weight_h[k] = __float2half2_rn(0.f);
}
#pragma unroll
for (int k = 0; k < (loop_unroll >> inner_loop_unroll_bits); k++) {
#pragma unroll
for (int t = 0; t < (INPUT_TILE1); t++) {
if ((t + j) < input_size) {
#pragma unroll
for (int li = 0; li < inner_loop_unroll; li++) {
weight_h[0] = (vals_f[(t << 2) + li] * weight_h[li]);
if (ACC_HALF)
sum[t] += weight_h[0];
else {
float2 weight_f = __half22float2(weight_h[0]);
float2 sum_f = __half22float2(sum[t]);
sum_f.x += weight_f.x;
sum_f.y += weight_f.y;
sum[t] = __float22half2_rn(sum_f);
}
}
}
}
}
}
wid += (warp_num << loop_unroll_bits);
weight_cast += (output_size * (warp_num << loop_unroll_bits));
}
}
{
const __half2* bias_cast;
if (bias) bias_cast = reinterpret_cast<const __half2*>(bias);
__shared__ __half2 partial_result[MAX_WARP_NUM * (WARP_SIZE + 1)];
for (int t = 0; t < (INPUT_TILE1); t++) {
if ((t + j) < input_size) {
__half2 sum_g = sum[t];
partial_result[gid * (WARP_SIZE + 1) + lane] = sum[t];
b.sync();
float2 sum_f;
sum_f = __half22float2(partial_result[lane * (WARP_SIZE + 1) + gid]);
b.sync();
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_f.x += g.shfl_xor(sum_f.x, i);
sum_f.y += g.shfl_xor(sum_f.y, i);
}
if (lane == 0) { partial_result[gid] = __float22half2_rn(sum_f); }
b.sync();
if (gid == 0) {
int col = blockIdx.x * WARP_SIZE + lane;
if (col < output_size) {
sum_g = partial_result[lane];
if (bias) {
float2 bias_f = __half22float2(bias_cast[col]);
sum_f = __half22float2(sum_g);
sum_f.x += bias_f.x;
sum_f.y += bias_f.y;
sum_g = __float22half2_rn(sum_f);
}
output_cast[col + (j + t) * output_size] = (sum_g);
}
}
}
}
}
weight_cast = reinterpret_cast<const __half2*>(weight);
}
#endif
}
__global__ void input_tiled_gemm_kernel(float* output,
const float* vals,
const float* weight,
const float* bias,
int hidden_dim,
int input_size,
int output_size)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
float2* output_cast = reinterpret_cast<float2*>(output);
const float2* vals_cast = reinterpret_cast<const float2*>(vals);
const float2* weight_cast = reinterpret_cast<const float2*>(weight);
int hidden_half = hidden_dim >> 1;
for (int j = 0; j < input_size; j += (INPUT_TILE1)) {
float2 sum[INPUT_TILE1];
#pragma unroll
for (int t = 0; t < (INPUT_TILE1); t++) {
sum[t].x = 0;
sum[t].y = 0;
}
{
int wid = gid << 1;
int offset = wid * output_size;
while (wid < hidden_dim) {
float2 val_data[INPUT_TILE1];
{
for (int t = 0; t < INPUT_TILE1; t++) {
if ((t + j) < input_size) {
val_data[t] = vals_cast[(j + t) * hidden_half + (wid >> 1)];
}
}
}
int row = blockIdx.x * WARP_SIZE + lane;
auto offset1 = offset + row;
while (row < output_size) {
float2 weight[2];
weight[0] = weight_cast[offset1];
weight[1] = weight_cast[output_size + offset1];
for (int t = 0; t < INPUT_TILE1; t++) {
if ((t + j) < input_size) {
float2 mul[2];
mul[0].x = val_data[t].x * weight[0].x;
mul[0].y = val_data[t].x * weight[0].y;
mul[1].x = val_data[t].y * weight[1].x;
mul[1].y = val_data[t].y * weight[1].y;
sum[t].x += mul[0].x + mul[1].x;
sum[t].y += mul[0].y + mul[1].y;
}
}
row += (gridDim.x * WARP_SIZE);
offset1 += (gridDim.x * WARP_SIZE);
}
wid += warp_num * 2;
offset += (output_size * warp_num * 2);
}
}
{
const float2* bias_cast;
if (bias) bias_cast = reinterpret_cast<const float2*>(bias);
__shared__ float2 partial_result[MAX_WARP_NUM * (WARP_SIZE + 1)];
for (int t = 0; t < (INPUT_TILE1); t++) {
if ((t + j) < input_size) {
float2 sum_g = sum[t];
partial_result[gid * (WARP_SIZE + 1) + lane] = sum_g;
__syncthreads();
sum_g = partial_result[lane * (WARP_SIZE + 1) + gid];
__syncthreads();
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_g.x += g.shfl_xor(sum_g.x, i);
sum_g.y += g.shfl_xor(sum_g.y, i);
}
if (lane == 0) { partial_result[gid] = sum_g; }
__syncthreads();
if (gid == 0) {
int col = blockIdx.x * WARP_SIZE + lane;
if (col < output_size) {
sum_g = partial_result[lane];
if (bias) {
float2 bias_f = bias_cast[col];
sum_g.x += bias_f.x;
sum_g.y += bias_f.y;
}
output_cast[col + (j + t) * output_size] = sum_g;
}
}
}
}
}
}
}
template <typename T>
void launch_input_tiled_gemm_kernel(T* output,
const T* vals,
const T* weight,
const T* bias,
int hidden_dim,
int input_size,
int output_size,
cudaStream_t stream)
{
constexpr int threads = 1024;
output_size /= 2;
dim3 grid_dim((output_size - 1) / WARP_SIZE + 1);
dim3 block_dim(threads);
input_tiled_gemm_kernel<<<grid_dim, block_dim, 0, stream>>>(
output, vals, weight, bias, hidden_dim, input_size, output_size);
}
template void launch_input_tiled_gemm_kernel(float* output,
const float* vals,
const float* weight,
const float* bias,
int hidden_dim,
int input_size,
int output_size,
cudaStream_t stream);
template void launch_input_tiled_gemm_kernel(__half* output,
const __half* vals,
const __half* weight,
const __half* bias,
int hidden_dim,
int input_size,
int output_size,
cudaStream_t stream);
template <typename T>
void launch_input_tiled_gemm_kernel(T* output,
const T* vals,
const int8_t* weight,
const T* bias,
int hidden_dim,
int input_size,
int output_size,
float* scale,
int groups,
int merge_count,
cudaStream_t stream)
{
constexpr int threads = 1024;
output_size /= 2;
dim3 grid_dim((output_size - 1) / WARP_SIZE + 1);
dim3 block_dim(threads);
input_tiled_gemm_kernel<<<grid_dim, block_dim, 0, stream>>>(output,
vals,
weight,
bias,
hidden_dim,
input_size,
output_size,
scale,
groups,
merge_count);
}
template void launch_input_tiled_gemm_kernel(__half* output,
const __half* vals,
const int8_t* weight,
const __half* bias,
int hidden_dim,
int input_size,
int output_size,
float* scale,
int groups,
int merge_count,
cudaStream_t stream);
__global__ void tiled_gemm_kernel_gelu(__half* output,
const __half* vals,
const __half* weight,
const __half* bias,
int hidden_dim,
int input_size,
int output_size)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
__half2 inp_reg[8];
__half2* output_cast = reinterpret_cast<__half2*>(output);
const __half2* vals_cast = reinterpret_cast<const __half2*>(vals);
const __half2* weight_cast = reinterpret_cast<const __half2*>(weight);
int input_tile = (input_size < INPUT_TILE ? input_size : INPUT_TILE);
int hidden_half = hidden_dim >> 1;
for (int j = 0; j < input_size; j += input_tile) {
__shared__ __half2 input_shared[9000];
{
int k = 0;
int input_id = id;
while (input_id < hidden_half) {
inp_reg[k] = vals_cast[j * hidden_half + input_id];
float2 inp_f = __half22float2(inp_reg[k]);
inp_f.x = gelu(inp_f.x);
inp_f.y = gelu(inp_f.y);
inp_reg[k] = __float22half2_rn(inp_f);
input_shared[input_id] = inp_reg[k++];
input_id += blockDim.x;
}
b.sync();
}
int wid = gid << 2;
int offset = wid * output_size;
float2 sum;
sum.x = 0;
sum.y = 0;
while (wid < hidden_dim) {
__half2 vals_f[4];
{
__half2 val_h[2];
val_h[0] = input_shared[(wid >> 1)];
val_h[1] = input_shared[(wid >> 1) + 1];
__half* inp_data[2];
inp_data[0] = reinterpret_cast<__half*>(&val_h[0]);
inp_data[1] = reinterpret_cast<__half*>(&val_h[1]);
vals_f[0] = __halves2half2(inp_data[0][0], inp_data[0][0]);
vals_f[1] = __halves2half2(inp_data[0][1], inp_data[0][1]);
vals_f[2] = __halves2half2(inp_data[1][0], inp_data[1][0]);
vals_f[3] = __halves2half2(inp_data[1][1], inp_data[1][1]);
}
int row = blockIdx.x * WARP_SIZE + lane;
auto offset1 = offset + row;
while (row < output_size) {
__half2 weight_h[4];
weight_h[0] = weight_cast[offset1];
weight_h[1] = weight_cast[output_size + offset1];
weight_h[2] = weight_cast[(output_size << 1) + offset1];
weight_h[3] = weight_cast[((output_size << 1) + output_size) + offset1];
{
float2 mul[4];
mul[0] = __half22float2(vals_f[0] * weight_h[0]);
mul[1] = __half22float2(vals_f[1] * weight_h[1]);
mul[2] = __half22float2(vals_f[2] * weight_h[2]);
mul[3] = __half22float2(vals_f[3] * weight_h[3]);
sum.x += mul[0].x + mul[1].x + mul[2].x + mul[3].x;
sum.y += mul[0].y + mul[1].y + mul[2].y + mul[3].y;
}
row += (gridDim.x * WARP_SIZE);
offset1 += (gridDim.x * WARP_SIZE);
}
wid += warp_num * 4;
offset += (output_size * warp_num * 4);
}
__shared__ float2 partial_result[MAX_WARP_NUM][WARP_SIZE + 1];
{
partial_result[gid][lane] = sum;
__syncthreads();
sum = partial_result[lane][gid];
__syncthreads();
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum.x += g.shfl_xor(sum.x, i);
sum.y += g.shfl_xor(sum.y, i);
}
if (lane == 0) { partial_result[0][gid] = sum; }
__syncthreads();
if (gid == 0) {
sum = partial_result[gid][lane];
int col = blockIdx.x * WARP_SIZE + lane;
if (col < output_size) { output_cast[j * output_size + col] = __float22half2_rn(sum); }
}
}
#endif
}
__global__ void tiled_gemm_kernel_gelu(float* output,
const float* vals,
const float* weight,
const float* bias,
int hidden_dim,
int input_size,
int output_size)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
float2 inp_reg[8];
float2* output_cast = reinterpret_cast<float2*>(output);
const float2* vals_cast = reinterpret_cast<const float2*>(vals);
const float2* weight_cast = reinterpret_cast<const float2*>(weight);
int input_tile = (input_size < INPUT_TILE ? input_size : INPUT_TILE);
int hidden_half = hidden_dim >> 1;
for (int j = 0; j < input_size; j += input_tile) {
__shared__ float2 input_shared[5000];
{
int k = 0;
int input_id = id;
while (input_id < hidden_half) {
inp_reg[k] = vals_cast[j * hidden_half + input_id];
inp_reg[k].x = gelu(inp_reg[k].x);
inp_reg[k].y = gelu(inp_reg[k].y);
input_shared[input_id] = inp_reg[k++];
input_id += blockDim.x;
}
b.sync();
}
int wid = gid << 1;
int offset = wid * output_size;
float2 sum;
sum.x = 0;
sum.y = 0;
while (wid < hidden_dim) {
float2 val_data;
{
val_data = input_shared[wid >> 1];
}
int row = blockIdx.x * WARP_SIZE + lane;
auto offset1 = offset + row;
while (row < output_size) {
float2 weight[2];
weight[0] = weight_cast[offset1];
weight[1] = weight_cast[output_size + offset1];
{
float2 mul[4];
mul[0].x = val_data.x * weight[0].x;
mul[0].y = val_data.x * weight[0].y;
mul[1].x = val_data.y * weight[1].x;
mul[1].y = val_data.y * weight[1].y;
sum.x += mul[0].x + mul[1].x;
sum.y += mul[0].y + mul[1].y;
}
row += (gridDim.x * WARP_SIZE);
offset1 += (gridDim.x * WARP_SIZE);
}
wid += warp_num * 2;
offset += (output_size * warp_num * 2);
}
__shared__ float2 partial_result[MAX_WARP_NUM][WARP_SIZE + 1];
{
partial_result[gid][lane] = sum;
__syncthreads();
sum = partial_result[lane][gid];
__syncthreads();
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum.x += g.shfl_xor(sum.x, i);
sum.y += g.shfl_xor(sum.y, i);
}
if (lane == 0) { partial_result[0][gid] = sum; }
__syncthreads();
if (gid == 0) {
sum = partial_result[gid][lane];
int col = blockIdx.x * WARP_SIZE + lane;
if (col < output_size) { output_cast[(j)*output_size + col] = sum; }
}
}
}
template <typename T>
void launch_tiled_gemm_kernel_gelu(T* output,
const T* vals,
const T* weight,
const T* bias,
int hidden_dim,
int input_size,
int output_size,
cudaStream_t stream)
{
constexpr int threads = 1024;
output_size /= 2;
dim3 grid_dim((output_size - 1) / WARP_SIZE + 1);
dim3 block_dim(threads);
input_tiled_gemm_kernel<<<grid_dim, block_dim, 0, stream>>>(
output, vals, weight, bias, hidden_dim, input_size, output_size);
}
template void launch_tiled_gemm_kernel_gelu(float* output,
const float* vals,
const float* weight,
const float* bias,
int hidden_dim,
int input_size,
int output_size,
cudaStream_t stream);
template void launch_tiled_gemm_kernel_gelu(__half* output,
const __half* vals,
const __half* weight,
const __half* bias,
int hidden_dim,
int input_size,
int output_size,
cudaStream_t stream);
__global__ void input_tiled_gemm_kernel(__half* output,
const __half* vals,
const int8_t* weight,
const __half* bias,
const __half* gamma,
const __half* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
float* qscale,
int groups)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
int quantization_stride = (hidden_dim * (output_size << 1)) / groups;
int col_index = blockIdx.x * WARP_SIZE + lane;
__half2* output_cast = reinterpret_cast<__half2*>(output);
const __half2* vals_cast = reinterpret_cast<const __half2*>(vals);
const int16_t* weight_cast = reinterpret_cast<const int16_t*>(weight);
// used for quantization scaling factor
__shared__ __half shared_quantize_scale[MAX_QUANTIZE_GROUPING];
// reading all the quantization scale into a small shared buffer
if (threadIdx.x < groups)
shared_quantize_scale[threadIdx.x] = __float2half(qscale[threadIdx.x]);
__syncthreads();
int hidden_half = hidden_dim >> 1;
for (int j = 0; j < input_size; j += INPUT_TILE) {
__shared__ __half2 input_shared[9000];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
__half2 inp_reg[8];
int k = 0;
int input_id = id;
while (input_id < hidden_half) {
inp_reg[k] = vals_cast[(j + t) * hidden_half + input_id];
input_shared[input_id + t * hidden_half] = inp_reg[k++];
input_id += blockDim.x;
}
b.sync();
float sum = 0;
for (int f = k - 1; f >= 0; f--) {
float2 inp_f = __half22float2(inp_reg[f]);
sum += inp_f.x + inp_f.y;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
__shared__ float shr[MAX_WARP_NUM];
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
float mean = sum / hidden_dim;
sum = 0.f;
for (int f = 0; f < k; f++) {
float2 inp_f = __half22float2(inp_reg[f]);
inp_f.x -= mean;
inp_f.y -= mean;
inp_reg[f] = __float22half2_rn(inp_f);
sum += inp_f.x * inp_f.x;
sum += inp_f.y * inp_f.y;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
sum /= hidden_dim;
sum += epsilon;
__half2 variance_h = __float2half2_rn(sum);
const __half2* gamma_cast = reinterpret_cast<const __half2*>(gamma);
const __half2* beta_cast = reinterpret_cast<const __half2*>(beta);
for (int f = 0; f < k; f++) {
int id = f * blockDim.x + threadIdx.x;
inp_reg[f] = inp_reg[f] * h2rsqrt(variance_h);
inp_reg[f] = inp_reg[f] * gamma_cast[id] + beta_cast[id];
input_shared[id + t * hidden_half] = inp_reg[f];
}
b.sync();
}
}
{
int wid = gid << 2;
weight_cast += (wid * output_size + col_index);
__half2 sum[INPUT_TILE];
for (int t = 0; t < INPUT_TILE; t++) { sum[t] = __float2half2_rn(0.f); }
while (wid < hidden_dim) {
// updating the quantization scale
__half2 qscale_data;
{
auto tmp = shared_quantize_scale[0];
qscale_data = __halves2half2(tmp, tmp);
if (groups > 1) {
unsigned index;
index = wid + (col_index << 1) * hidden_dim;
qscale_data = __halves2half2(
shared_quantize_scale[index / quantization_stride],
shared_quantize_scale[(index + hidden_dim) / quantization_stride]);
}
}
__half2 vals_f[4 * INPUT_TILE];
for (int t = 0; t < INPUT_TILE; t++) {
__half2 val_h[2];
val_h[0] = input_shared[t * hidden_half + (wid >> 1)];
val_h[1] = input_shared[t * hidden_half + (wid >> 1) + 1];
__half* inp_data[2];
inp_data[0] = reinterpret_cast<__half*>(&val_h[0]);
inp_data[1] = reinterpret_cast<__half*>(&val_h[1]);
vals_f[(t << 2)] = __halves2half2(inp_data[0][0], inp_data[0][0]);
vals_f[(t << 2) + 1] = __halves2half2(inp_data[0][1], inp_data[0][1]);
vals_f[(t << 2) + 2] = __halves2half2(inp_data[1][0], inp_data[1][0]);
vals_f[(t << 2) + 3] = __halves2half2(inp_data[1][1], inp_data[1][1]);
}
if (col_index < output_size) {
int16_t weight_q[loop_unroll];
#pragma unroll
for (int k = 0; k < loop_unroll; k++)
weight_q[k] = weight_cast[k * output_size];
#pragma unroll
for (int t = 0; t < INPUT_TILE; t++) {
#pragma unroll
for (int li = 0; li < loop_unroll; li++) {
float2 weight_f;
int8_t* weight_8 = reinterpret_cast<int8_t*>(&weight_q[li]);
weight_f.x = (float)weight_8[0];
weight_f.y = (float)weight_8[1];
auto mul =
__float22half2_rn(weight_f) * qscale_data * vals_f[(t << 2) + li];
if (ACC_HALF)
sum[t] += mul;
else {
float2 mul_f = __half22float2(mul);
float2 sum_f = __half22float2(sum[t]);
sum_f.x += mul_f.x;
sum_f.y += mul_f.y;
sum[t] = __float22half2_rn(sum_f);
}
}
}
}
wid += (warp_num << loop_unroll_bits);
weight_cast += (output_size * (warp_num << loop_unroll_bits));
}
__shared__ __half2 partial_result[MAX_WARP_NUM][WARP_SIZE + 1];
const __half2* bias_cast = reinterpret_cast<const __half2*>(bias);
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
partial_result[gid][lane] = sum[t];
__syncthreads();
sum[t] = partial_result[lane][gid];
if (ACC_HALF) {
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
float* sum_f = reinterpret_cast<float*>(&sum[t]);
float temp = g.shfl_xor(*sum_f, i);
__half2* sum_h = reinterpret_cast<__half2*>(&temp);
sum[t] += *sum_h;
}
} else {
float2 sum_g = __half22float2(sum[t]);
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_g.x += g.shfl_xor(sum_g.x, i);
sum_g.y += g.shfl_xor(sum_g.y, i);
}
sum[t] = __float22half2_rn(sum_g);
}
if (lane == 0) { partial_result[0][gid] = sum[0]; }
__syncthreads();
if (gid == 0) {
sum[0] = partial_result[0][lane];
if (col_index < output_size) {
float2 bias_f = __half22float2(bias_cast[col_index]);
float2 sum_g = __half22float2(sum[0]);
sum_g.x += bias_f.x;
sum_g.y += bias_f.y;
output_cast[(j + t) * output_size + col_index] =
__float22half2_rn(sum_g);
}
}
}
}
}
weight_cast = reinterpret_cast<const int16_t*>(weight);
}
#endif
}
__global__ void input_tiled_gemm_kernel(__half* output,
const __half* vals,
const __half* weight,
const __half* bias,
const __half* gamma,
const __half* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
__half2* output_cast = reinterpret_cast<__half2*>(output);
const __half2* vals_cast = reinterpret_cast<const __half2*>(vals);
const __half2* weight_cast = reinterpret_cast<const __half2*>(weight);
int hidden_half = hidden_dim >> 1;
for (int j = 0; j < input_size; j += INPUT_TILE) {
__shared__ __half2 input_shared[9000];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
__half2 inp_reg[8];
int k = 0;
int input_id = id; //(gid + warp_num * lane);
while (input_id < hidden_half) {
inp_reg[k] = vals_cast[(j + t) * hidden_half + input_id];
input_shared[input_id + t * hidden_half] = inp_reg[k++];
input_id += blockDim.x;
}
// b.sync();
float sum = 0;
for (int f = k - 1; f >= 0; f--) {
float2 inp_f = __half22float2(inp_reg[f]);
sum += inp_f.x + inp_f.y;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
__shared__ float shr[MAX_WARP_NUM];
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
float mean = sum / hidden_dim;
sum = 0.f;
for (int f = 0; f < k; f++) {
float2 inp_f = __half22float2(inp_reg[f]);
inp_f.x -= mean;
inp_f.y -= mean;
inp_reg[f] = __float22half2_rn(inp_f);
sum += inp_f.x * inp_f.x;
sum += inp_f.y * inp_f.y;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
sum /= hidden_dim;
sum += epsilon;
sum = __frsqrt_rn(sum);
__half2 variance_h = __float2half2_rn(sum);
const __half2* gamma_cast = reinterpret_cast<const __half2*>(gamma);
const __half2* beta_cast = reinterpret_cast<const __half2*>(beta);
for (int f = 0; f < k; f++) {
int id = f * blockDim.x + threadIdx.x;
inp_reg[f] = inp_reg[f] * variance_h;
inp_reg[f] = inp_reg[f] * gamma_cast[id] + beta_cast[id];
input_shared[id + t * hidden_half] = inp_reg[f];
}
b.sync();
}
}
{
int wid = gid << 2;
int offset = wid * output_size;
float2 sum[INPUT_TILE];
for (int t = 0; t < INPUT_TILE; t++) {
sum[t].x = 0;
sum[t].y = 0;
}
while (wid < hidden_dim) {
__half2 vals_f[4 * INPUT_TILE];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
__half2 val_h[2];
val_h[0] = input_shared[t * hidden_half + (wid >> 1)];
val_h[1] = input_shared[t * hidden_half + (wid >> 1) + 1];
__half* inp_data[2];
inp_data[0] = reinterpret_cast<__half*>(&val_h[0]);
inp_data[1] = reinterpret_cast<__half*>(&val_h[1]);
vals_f[(t << 2)] = __halves2half2(inp_data[0][0], inp_data[0][0]);
vals_f[(t << 2) + 1] = __halves2half2(inp_data[0][1], inp_data[0][1]);
vals_f[(t << 2) + 2] = __halves2half2(inp_data[1][0], inp_data[1][0]);
vals_f[(t << 2) + 3] = __halves2half2(inp_data[1][1], inp_data[1][1]);
}
}
int row = blockIdx.x * WARP_SIZE + lane;
auto offset1 = offset + row;
while (row < output_size) {
__half2 weight_h[4];
weight_h[0] = weight_cast[offset1];
weight_h[1] = weight_cast[output_size + offset1];
weight_h[2] = weight_cast[(output_size << 1) + offset1];
weight_h[3] = weight_cast[((output_size << 1) + output_size) + offset1];
#pragma unroll
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
float2 mul[4];
mul[0] = __half22float2(vals_f[(t << 2)] * weight_h[0]);
mul[1] = __half22float2(vals_f[(t << 2) + 1] * weight_h[1]);
mul[2] = __half22float2(vals_f[(t << 2) + 2] * weight_h[2]);
mul[3] = __half22float2(vals_f[(t << 2) + 3] * weight_h[3]);
sum[t].x += mul[0].x + mul[1].x + mul[2].x + mul[3].x;
sum[t].y += mul[0].y + mul[1].y + mul[2].y + mul[3].y;
}
}
row += (gridDim.x * WARP_SIZE);
offset1 += (gridDim.x * WARP_SIZE);
}
wid += warp_num * 4;
offset += (output_size * warp_num * 4);
}
__shared__ float2 partial_result[MAX_WARP_NUM][WARP_SIZE + 1];
const __half2* bias_cast = reinterpret_cast<const __half2*>(bias);
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
float2 sum_g = sum[t];
partial_result[gid][lane] = sum_g;
__syncthreads();
sum_g = partial_result[lane][gid];
//__syncthreads();
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_g.x += g.shfl_xor(sum_g.x, i);
sum_g.y += g.shfl_xor(sum_g.y, i);
}
if (lane == 0) { partial_result[0][gid] = sum_g; }
__syncthreads();
if (gid == 0) {
sum_g = partial_result[0][lane];
int col = blockIdx.x * WARP_SIZE + lane;
if (col < output_size) {
float2 bias_f = __half22float2(bias_cast[col]);
sum_g.x += bias_f.x;
sum_g.y += bias_f.y;
output_cast[(j + t) * output_size + col] = __float22half2_rn(sum_g);
}
}
}
}
}
}
#endif
}
__global__ void input_tiled_gemm_kernel(float* output,
const float* vals,
const float* weight,
const float* bias,
const float* gamma,
const float* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
float2* output_cast = reinterpret_cast<float2*>(output);
const float2* vals_cast = reinterpret_cast<const float2*>(vals);
const float2* weight_cast = reinterpret_cast<const float2*>(weight);
int hidden_half = hidden_dim >> 1;
for (int j = 0; j < input_size; j += INPUT_TILE) {
__shared__ float2 input_shared[5000];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
float2 inp_reg[8];
int k = 0; // Check if k goes from 0 to 7
int input_id = id; //(gid + warp_num * lane);
while (input_id < hidden_half) {
inp_reg[k] = vals_cast[(j + t) * hidden_half + input_id];
input_shared[input_id + t * hidden_half] = inp_reg[k++];
input_id += blockDim.x;
}
b.sync();
float sum = 0;
for (int f = k - 1; f >= 0; f--) { sum += inp_reg[f].x + inp_reg[f].y; }
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
__shared__ float shr[MAX_WARP_NUM];
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
float mean = sum / hidden_dim;
sum = 0.f;
for (int f = 0; f < k; f++) {
inp_reg[f].x -= mean;
inp_reg[f].y -= mean;
sum += inp_reg[f].x * inp_reg[f].x;
sum += inp_reg[f].y * inp_reg[f].y;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
sum /= hidden_dim;
sum += epsilon;
sum = __frsqrt_rn(sum);
const float2* gamma_cast = reinterpret_cast<const float2*>(gamma);
const float2* beta_cast = reinterpret_cast<const float2*>(beta);
for (int f = 0; f < k; f++) {
int id = f * blockDim.x + threadIdx.x;
inp_reg[f].x = inp_reg[f].x * sum;
inp_reg[f].y = inp_reg[f].y * sum;
inp_reg[f].x = inp_reg[f].x * gamma_cast[id].x + beta_cast[id].x;
inp_reg[f].y = inp_reg[f].y * gamma_cast[id].y + beta_cast[id].y;
input_shared[id + t * hidden_half] = inp_reg[f];
}
b.sync();
}
}
{
int wid = gid << 1;
int offset = wid * output_size;
float2 sum[INPUT_TILE];
for (int t = 0; t < INPUT_TILE; t++) {
sum[t].x = 0;
sum[t].y = 0;
}
while (wid < hidden_dim) {
float2 val_data[INPUT_TILE];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
val_data[t] = input_shared[t * hidden_half + (wid >> 1)];
}
}
int row = blockIdx.x * WARP_SIZE + lane;
auto offset1 = offset + row;
while (row < output_size) {
float2 weight[2];
weight[0] = weight_cast[offset1];
weight[1] = weight_cast[output_size + offset1];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
float2 mul[2];
mul[0].x = val_data[t].x * weight[0].x;
mul[0].y = val_data[t].x * weight[0].y;
mul[1].x = val_data[t].y * weight[1].x;
mul[1].y = val_data[t].y * weight[1].y;
sum[t].x += mul[0].x + mul[1].x;
sum[t].y += mul[0].y + mul[1].y;
}
}
row += (gridDim.x * WARP_SIZE);
offset1 += (gridDim.x * WARP_SIZE);
}
wid += warp_num * 2;
offset += (output_size * warp_num * 2);
}
__shared__ float2 partial_result[MAX_WARP_NUM][WARP_SIZE + 1];
const float2* bias_cast = reinterpret_cast<const float2*>(bias);
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
float2 sum_g = sum[t];
partial_result[gid][lane] = sum_g;
__syncthreads();
sum_g = partial_result[lane][gid];
//__syncthreads();
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_g.x += g.shfl_xor(sum_g.x, i);
sum_g.y += g.shfl_xor(sum_g.y, i);
}
if (lane == 0) { partial_result[0][gid] = sum_g; }
__syncthreads();
if (gid == 0) {
sum_g = partial_result[0][lane];
int col = blockIdx.x * WARP_SIZE + lane;
if (col < output_size) {
float2 bias_f = bias_cast[col];
sum_g.x += bias_f.x;
sum_g.y += bias_f.y;
output_cast[(j + t) * output_size + col] = sum_g;
}
}
}
}
}
}
}
template <typename T>
void launch_input_tiled_gemm_kernel(T* output,
const T* vals,
const T* weight,
const T* bias,
const T* gamma,
const T* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
cudaStream_t stream)
{
constexpr int threads = 1024;
output_size /= 2;
dim3 grid_dim((output_size - 1) / WARP_SIZE + 1);
dim3 block_dim(threads);
input_tiled_gemm_kernel<<<grid_dim, block_dim, 0, stream>>>(
output, vals, weight, bias, gamma, beta, epsilon, hidden_dim, input_size, output_size);
}
template void launch_input_tiled_gemm_kernel(float* output,
const float* vals,
const float* weight,
const float* bias,
const float* gamma,
const float* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
cudaStream_t stream);
template void launch_input_tiled_gemm_kernel(__half* output,
const __half* vals,
const __half* weight,
const __half* bias,
const __half* gamma,
const __half* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
cudaStream_t stream);
template <typename T>
void launch_input_tiled_gemm_kernel(T* output,
const T* vals,
const int8_t* weight,
const T* bias,
const T* gamma,
const T* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
float* scale,
int groups,
cudaStream_t stream)
{
constexpr int threads = 1024;
output_size /= 2;
dim3 grid_dim((output_size - 1) / WARP_SIZE + 1);
dim3 block_dim(threads);
input_tiled_gemm_kernel<<<grid_dim, block_dim, 0, stream>>>(output,
vals,
weight,
bias,
gamma,
beta,
epsilon,
hidden_dim,
input_size,
output_size,
scale,
groups);
}
template void launch_input_tiled_gemm_kernel(__half* output,
const __half* vals,
const int8_t* weight,
const __half* bias,
const __half* gamma,
const __half* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
float* scale,
int groups,
cudaStream_t stream);
__global__ void input_tiled_gemm_kernel_gelu(__half* output,
__half* residual_add,
const __half* vals,
const __half* residual,
const __half* input_bias,
const int8_t* weight,
const __half* bias,
const __half* gamma,
const __half* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
float* qscale,
int groups)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
__half2* output_cast = reinterpret_cast<__half2*>(output);
const __half2* vals_cast = reinterpret_cast<const __half2*>(vals);
const __half2* residual_cast = reinterpret_cast<const __half2*>(residual);
__half2* residual_add_cast = reinterpret_cast<__half2*>(residual_add);
const int16_t* weight_cast = reinterpret_cast<const int16_t*>(weight);
const __half2* input_bias_cast = reinterpret_cast<const __half2*>(input_bias);
int hidden_half = hidden_dim >> 1;
int quantization_stride = (hidden_dim * (output_size << 1)) / groups;
__shared__ __half shared_quantize_scale[MAX_QUANTIZE_GROUPING];
// reading all the quantization scale into a small shared buffer
if (threadIdx.x < groups)
shared_quantize_scale[threadIdx.x] = __float2half(qscale[threadIdx.x]);
__syncthreads();
int col_index = blockIdx.x * WARP_SIZE + lane;
for (int j = 0; j < input_size; j += INPUT_TILE) {
__shared__ __half2 input_shared[9000];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
__half2 inp_reg[8];
int k = 0;
int input_id = id;
while (input_id < hidden_half) {
inp_reg[k] = vals_cast[(j + t) * hidden_half + input_id];
float2 inp_f = __half22float2(inp_reg[k]);
float2 residual_f =
__half22float2(residual_cast[(j + t) * hidden_half + input_id]);
float2 bias_f = __half22float2(input_bias_cast[input_id]);
inp_f.x += residual_f.x + bias_f.x;
inp_f.y += residual_f.y + bias_f.y;
inp_reg[k] = __float22half2_rn(inp_f);
residual_add_cast[(j + t) * hidden_half + input_id] = inp_reg[k];
input_shared[input_id + t * hidden_half] = inp_reg[k++];
input_id += blockDim.x;
}
b.sync();
float sum = 0;
for (int f = k - 1; f >= 0; f--) {
float2 inp_f = __half22float2(inp_reg[f]);
sum += inp_f.x + inp_f.y;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
__shared__ float shr[MAX_WARP_NUM];
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
float mean = sum / hidden_dim;
sum = 0.f;
for (int f = 0; f < k; f++) {
float2 inp_f = __half22float2(inp_reg[f]);
inp_f.x -= mean;
inp_f.y -= mean;
inp_reg[f] = __float22half2_rn(inp_f);
sum += inp_f.x * inp_f.x;
sum += inp_f.y * inp_f.y;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
sum /= hidden_dim;
sum += epsilon;
__half2 variance_h = __float2half2_rn(sum);
const __half2* gamma_cast = reinterpret_cast<const __half2*>(gamma);
const __half2* beta_cast = reinterpret_cast<const __half2*>(beta);
for (int f = 0; f < k; f++) {
int id = f * blockDim.x + threadIdx.x;
inp_reg[f] = inp_reg[f] * h2rsqrt(variance_h);
inp_reg[f] = inp_reg[f] * gamma_cast[id] + beta_cast[id];
input_shared[id + t * hidden_half] = inp_reg[f];
}
b.sync();
}
}
int wid = gid << 2;
weight_cast += (wid * output_size + col_index);
__half2 sum[INPUT_TILE];
for (int t = 0; t < INPUT_TILE; t++) { sum[t] = __float2half2_rn(0.f); }
while (wid < hidden_dim) {
// updating the quantization scale
__half2 qscale_data;
{
auto tmp = shared_quantize_scale[0];
qscale_data = __halves2half2(tmp, tmp);
if (groups > 1) {
unsigned index;
index = wid + (col_index << 1) * hidden_dim;
qscale_data = __halves2half2(
shared_quantize_scale[((index / quantization_stride))],
shared_quantize_scale[((index + hidden_dim) / quantization_stride)]);
}
}
__half2 vals_f[INPUT_TILE * 4];
for (int t = 0; t < INPUT_TILE; t++) {
__half2 val_h[2];
val_h[0] = input_shared[t * hidden_half + (wid >> 1)];
val_h[1] = input_shared[t * hidden_half + (wid >> 1) + 1];
__half* inp_data[2];
inp_data[0] = reinterpret_cast<__half*>(&val_h[0]);
inp_data[1] = reinterpret_cast<__half*>(&val_h[1]);
vals_f[(t << 2)] = __halves2half2(inp_data[0][0], inp_data[0][0]);
vals_f[(t << 2) + 1] = __halves2half2(inp_data[0][1], inp_data[0][1]);
vals_f[(t << 2) + 2] = __halves2half2(inp_data[1][0], inp_data[1][0]);
vals_f[(t << 2) + 3] = __halves2half2(inp_data[1][1], inp_data[1][1]);
}
if (col_index < output_size) {
int16_t weight_q[loop_unroll];
#pragma unroll
for (int k = 0; k < loop_unroll; k++) weight_q[k] = weight_cast[k * output_size];
#pragma unroll
for (int t = 0; t < INPUT_TILE; t++) {
#pragma unroll
for (int li = 0; li < loop_unroll; li++) {
int8_t* weight_8 = reinterpret_cast<int8_t*>(&weight_q[li]);
float2 weight_f;
weight_f.x = (float)weight_8[0];
weight_f.y = (float)weight_8[1];
auto mul =
__float22half2_rn(weight_f) * qscale_data * vals_f[(t << 2) + li];
if (ACC_HALF)
sum[t] += mul;
else {
float2 mul_f = __half22float2(mul);
float2 sum_f = __half22float2(sum[t]);
sum_f.x += mul_f.x;
sum_f.y += mul_f.y;
sum[t] = __float22half2_rn(sum_f);
}
}
}
}
wid += (warp_num << loop_unroll_bits);
weight_cast += ((warp_num << loop_unroll_bits) * output_size);
}
{
__shared__ __half2 partial_result[MAX_WARP_NUM][WARP_SIZE + 1];
const __half2* bias_cast = reinterpret_cast<const __half2*>(bias);
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
partial_result[gid][lane] = sum[t];
__syncthreads();
sum[t] = partial_result[lane][gid];
if (ACC_HALF) {
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
float* sum_f = reinterpret_cast<float*>(&sum[t]);
float temp = g.shfl_xor(*sum_f, i);
__half2* sum_h = reinterpret_cast<__half2*>(&temp);
sum[t] += *sum_h;
}
} else {
float2 sum_g = __half22float2(sum[t]);
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_g.x += g.shfl_xor(sum_g.x, i);
sum_g.y += g.shfl_xor(sum_g.y, i);
}
sum[t] = __float22half2_rn(sum_g);
}
if (lane == 0) { partial_result[0][gid] = sum[t]; }
__syncthreads();
if (gid == 0) {
if (col_index < output_size) {
float2 sum_g = __half22float2(partial_result[0][lane]);
float2 bias_f = __half22float2(bias_cast[col_index]);
sum_g.x = bias_f.x + sum_g.x;
sum_g.y = bias_f.y + sum_g.y;
sum_g.x = gelu(sum_g.x);
sum_g.y = gelu(sum_g.y);
output_cast[(j + t) * output_size + col_index] =
__float22half2_rn(sum_g);
}
}
}
}
}
weight_cast = reinterpret_cast<const int16_t*>(weight);
}
#endif
}
__global__ void input_tiled_gemm_kernel_gelu(__half* output,
__half* residual_add,
const __half* vals,
const __half* residual,
const __half* input_bias,
const __half* weight,
const __half* bias,
const __half* gamma,
const __half* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
__half2* output_cast = reinterpret_cast<__half2*>(output);
const __half2* vals_cast = reinterpret_cast<const __half2*>(vals);
const __half2* residual_cast = reinterpret_cast<const __half2*>(residual);
__half2* residual_add_cast = reinterpret_cast<__half2*>(residual_add);
const __half2* weight_cast = reinterpret_cast<const __half2*>(weight);
const __half2* input_bias_cast = reinterpret_cast<const __half2*>(input_bias);
int hidden_half = hidden_dim >> 1;
for (int j = 0; j < input_size; j += INPUT_TILE) {
__shared__ __half2 input_shared[9000];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
__half2 inp_reg[8];
int k = 0;
int input_id = id;
while (input_id < hidden_half) {
inp_reg[k] = vals_cast[(j + t) * hidden_half + input_id];
float2 inp_f = __half22float2(inp_reg[k]);
float2 residual_f =
__half22float2(residual_cast[(j + t) * hidden_half + input_id]);
float2 bias_f = __half22float2(input_bias_cast[input_id]);
inp_f.x += residual_f.x + bias_f.x;
inp_f.y += residual_f.y + bias_f.y;
inp_reg[k] = __float22half2_rn(inp_f);
residual_add_cast[(j + t) * hidden_half + input_id] = inp_reg[k];
input_shared[input_id + t * hidden_half] = inp_reg[k++];
input_id += blockDim.x;
}
// b.sync();
float sum = 0;
for (int f = k - 1; f >= 0; f--) {
float2 inp_f = __half22float2(inp_reg[f]);
sum += inp_f.x + inp_f.y;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
__shared__ float shr[MAX_WARP_NUM];
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
float mean = sum / hidden_dim;
sum = 0.f;
for (int f = 0; f < k; f++) {
float2 inp_f = __half22float2(inp_reg[f]);
inp_f.x -= mean;
inp_f.y -= mean;
inp_reg[f] = __float22half2_rn(inp_f);
sum += inp_f.x * inp_f.x;
sum += inp_f.y * inp_f.y;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
sum /= hidden_dim;
sum += epsilon;
sum = __frsqrt_rn(sum);
__half2 variance_h = __float2half2_rn(sum);
const __half2* gamma_cast = reinterpret_cast<const __half2*>(gamma);
const __half2* beta_cast = reinterpret_cast<const __half2*>(beta);
for (int f = 0; f < k; f++) {
int tid = f * blockDim.x + threadIdx.x;
inp_reg[f] = inp_reg[f] * variance_h;
inp_reg[f] = inp_reg[f] * gamma_cast[tid] + beta_cast[tid];
input_shared[tid + t * hidden_half] = inp_reg[f];
// output_cast[(j + t) * hidden_half + tid] = inp_reg[f];
}
b.sync();
}
}
int wid = gid << 2;
int offset = wid * output_size;
float2 sum[INPUT_TILE];
for (int t = 0; t < INPUT_TILE; t++) {
sum[t].x = 0;
sum[t].y = 0;
}
while (wid < hidden_dim) {
__half2 vals_f[INPUT_TILE * 4];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
__half2 val_h[2];
val_h[0] = input_shared[t * hidden_half + (wid >> 1)];
val_h[1] = input_shared[t * hidden_half + (wid >> 1) + 1];
__half* inp_data[2];
inp_data[0] = reinterpret_cast<__half*>(&val_h[0]);
inp_data[1] = reinterpret_cast<__half*>(&val_h[1]);
vals_f[(t << 2)] = __halves2half2(inp_data[0][0], inp_data[0][0]);
vals_f[(t << 2) + 1] = __halves2half2(inp_data[0][1], inp_data[0][1]);
vals_f[(t << 2) + 2] = __halves2half2(inp_data[1][0], inp_data[1][0]);
vals_f[(t << 2) + 3] = __halves2half2(inp_data[1][1], inp_data[1][1]);
}
}
int row = blockIdx.x * WARP_SIZE + lane;
auto offset1 = offset + row;
while (row < output_size) {
__half2 weight_h[4];
weight_h[0] = weight_cast[offset1];
weight_h[1] = weight_cast[output_size + offset1];
weight_h[2] = weight_cast[(output_size << 1) + offset1];
weight_h[3] = weight_cast[((output_size << 1) + output_size) + offset1];
#pragma unroll
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
float2 mul[4];
mul[0] = __half22float2(vals_f[(t << 2)] * weight_h[0]);
mul[1] = __half22float2(vals_f[(t << 2) + 1] * weight_h[1]);
mul[2] = __half22float2(vals_f[(t << 2) + 2] * weight_h[2]);
mul[3] = __half22float2(vals_f[(t << 2) + 3] * weight_h[3]);
sum[t].x += mul[0].x + mul[1].x + mul[2].x + mul[3].x;
sum[t].y += mul[0].y + mul[1].y + mul[2].y + mul[3].y;
}
}
row += (gridDim.x * WARP_SIZE);
offset1 += (gridDim.x * WARP_SIZE);
}
wid += warp_num * 4;
offset += (output_size * warp_num * 4);
}
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
float2 sum_g = sum[t];
__shared__ float2 partial_result[MAX_WARP_NUM][WARP_SIZE + 1];
const __half2* bias_cast = reinterpret_cast<const __half2*>(bias);
{
partial_result[gid][lane] = sum_g;
__syncthreads();
sum_g = partial_result[lane][gid];
//__syncthreads();
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_g.x += g.shfl_xor(sum_g.x, i);
sum_g.y += g.shfl_xor(sum_g.y, i);
}
if (lane == 0) { partial_result[0][gid] = sum_g; }
__syncthreads();
if (gid == 0) {
int col = blockIdx.x * WARP_SIZE + lane;
if (col < output_size) {
sum_g = partial_result[0][lane];
float2 bias_f = __half22float2(bias_cast[col]);
sum_g.x = bias_f.x + sum_g.x;
sum_g.y = bias_f.y + sum_g.y;
sum_g.x = gelu(sum_g.x);
sum_g.y = gelu(sum_g.y);
output_cast[(j + t) * output_size + col] = __float22half2_rn(sum_g);
}
}
}
}
}
#endif
}
__global__ void input_tiled_gemm_kernel_gelu(float* output,
float* residual_add,
const float* vals,
const float* residual,
const float* input_bias,
const float* weight,
const float* bias,
const float* gamma,
const float* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
float2* output_cast = reinterpret_cast<float2*>(output);
const float2* vals_cast = reinterpret_cast<const float2*>(vals);
const float2* residual_cast = reinterpret_cast<const float2*>(residual);
float2* residual_add_cast = reinterpret_cast<float2*>(residual_add);
const float2* weight_cast = reinterpret_cast<const float2*>(weight);
const float2* input_bias_cast = reinterpret_cast<const float2*>(input_bias);
int hidden_half = hidden_dim >> 1;
for (int j = 0; j < input_size; j += INPUT_TILE) {
__shared__ float2 input_shared[5000];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
float2 inp_reg[8];
int k = 0;
int input_id = id;
while (input_id < hidden_half) {
inp_reg[k] = vals_cast[(j + t) * hidden_half + input_id];
float2 residual_f = residual_cast[(j + t) * hidden_half + input_id];
float2 bias_f = input_bias_cast[input_id];
inp_reg[k].x += residual_f.x + bias_f.x;
inp_reg[k].y += residual_f.y + bias_f.y;
residual_add_cast[(j + t) * hidden_half + input_id] = inp_reg[k];
input_shared[input_id + t * hidden_half] = inp_reg[k++];
input_id += blockDim.x;
}
b.sync();
float sum = 0;
for (int f = k - 1; f >= 0; f--) { sum += inp_reg[f].x + inp_reg[f].y; }
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
__shared__ float shr[MAX_WARP_NUM];
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
float mean = sum / hidden_dim;
sum = 0.f;
for (int f = 0; f < k; f++) {
inp_reg[f].x -= mean;
inp_reg[f].y -= mean;
sum += inp_reg[f].x * inp_reg[f].x;
sum += inp_reg[f].y * inp_reg[f].y;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()];
for (int i = 1; i < (warp_num); i *= 2) { sum += g.shfl_down(sum, i); }
sum = g.shfl(sum, 0);
sum /= hidden_dim;
sum += epsilon;
sum = __frsqrt_rn(sum);
const float2* gamma_cast = reinterpret_cast<const float2*>(gamma);
const float2* beta_cast = reinterpret_cast<const float2*>(beta);
for (int f = 0; f < k; f++) {
int id = f * blockDim.x + threadIdx.x;
inp_reg[f].x = inp_reg[f].x * sum;
inp_reg[f].y = inp_reg[f].y * sum;
inp_reg[f].x = inp_reg[f].x * gamma_cast[id].x + beta_cast[id].x;
inp_reg[f].y = inp_reg[f].y * gamma_cast[id].y + beta_cast[id].y;
input_shared[id + t * hidden_half] = inp_reg[f];
}
b.sync();
}
}
int wid = gid << 1;
int offset = wid * output_size;
float2 sum[INPUT_TILE];
for (int t = 0; t < INPUT_TILE; t++) {
sum[t].x = 0;
sum[t].y = 0;
}
while (wid < hidden_dim) {
float2 val_data[INPUT_TILE];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
val_data[t] = input_shared[t * hidden_half + (wid >> 1)];
}
}
int row = blockIdx.x * WARP_SIZE + lane;
auto offset1 = offset + row;
while (row < output_size) {
float2 weight[2];
weight[0] = weight_cast[offset1];
weight[1] = weight_cast[output_size + offset1];
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
float2 mul[2];
mul[0].x = val_data[t].x * weight[0].x;
mul[0].y = val_data[t].x * weight[0].y;
mul[1].x = val_data[t].y * weight[1].x;
mul[1].y = val_data[t].y * weight[1].y;
sum[t].x += mul[0].x + mul[1].x;
sum[t].y += mul[0].y + mul[1].y;
}
}
row += (gridDim.x * WARP_SIZE);
offset1 += (gridDim.x * WARP_SIZE);
}
wid += warp_num * 2;
offset += (output_size * warp_num * 2);
}
for (int t = 0; t < INPUT_TILE; t++) {
if ((t + j) < input_size) {
float2 sum_g = sum[t];
__shared__ float2 partial_result[MAX_WARP_NUM][WARP_SIZE + 1];
const float2* bias_cast = reinterpret_cast<const float2*>(bias);
{
partial_result[gid][lane] = sum_g;
__syncthreads();
sum_g = partial_result[lane][gid];
__syncthreads();
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i *= 2) {
sum_g.x += g.shfl_xor(sum_g.x, i);
sum_g.y += g.shfl_xor(sum_g.y, i);
}
if (lane == 0) { partial_result[0][gid] = sum_g; }
__syncthreads();
if (gid == 0) {
int col = blockIdx.x * WARP_SIZE + lane;
if (col < output_size) {
sum_g = partial_result[0][lane];
float2 bias_f = bias_cast[col];
sum_g.x = bias_f.x + sum_g.x;
sum_g.y = bias_f.y + sum_g.y;
sum_g.x = gelu(sum_g.x);
sum_g.y = gelu(sum_g.y);
output_cast[(j + t) * output_size + col] = sum_g;
}
}
}
}
}
}
template <typename T>
void launch_input_tiled_gemm_kernel_gelu(T* output,
T* residual_add,
const T* vals,
const T* residual,
const T* input_bias,
const T* weight,
const T* bias,
const T* gamma,
const T* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
cudaStream_t stream)
{
constexpr int threads = 1024;
output_size /= 2;
dim3 grid_dim((output_size - 1) / WARP_SIZE + 1);
dim3 block_dim(threads);
input_tiled_gemm_kernel_gelu<<<grid_dim, block_dim, 0, stream>>>(output,
residual_add,
vals,
residual,
input_bias,
weight,
bias,
gamma,
beta,
epsilon,
hidden_dim,
input_size,
output_size);
}
template void launch_input_tiled_gemm_kernel_gelu(float* output,
float* residual_add,
const float* vals,
const float* residual,
const float* input_bias,
const float* weight,
const float* bias,
const float* gamma,
const float* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
cudaStream_t stream);
template void launch_input_tiled_gemm_kernel_gelu(__half* output,
__half* residual_add,
const __half* vals,
const __half* residual,
const __half* input_bias,
const __half* weight,
const __half* bias,
const __half* gamma,
const __half* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
cudaStream_t stream);
template <typename T>
void launch_input_tiled_gemm_kernel_gelu(T* output,
T* residual_add,
const T* vals,
const T* residual,
const T* input_bias,
const int8_t* weight,
const T* bias,
const T* gamma,
const T* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
float* scale,
int groups,
cudaStream_t stream)
{
constexpr int threads = 1024;
output_size /= 2;
dim3 grid_dim((output_size - 1) / WARP_SIZE + 1);
dim3 block_dim(threads);
input_tiled_gemm_kernel_gelu<<<grid_dim, block_dim, 0, stream>>>(output,
residual_add,
vals,
residual,
input_bias,
weight,
bias,
gamma,
beta,
epsilon,
hidden_dim,
input_size,
output_size,
scale,
groups);
}
template void launch_input_tiled_gemm_kernel_gelu(__half* output,
__half* residual_add,
const __half* vals,
const __half* residual,
const __half* input_bias,
const int8_t* weight,
const __half* bias,
const __half* gamma,
const __half* beta,
const float epsilon,
int hidden_dim,
int input_size,
int output_size,
float* scale,
int groups,
cudaStream_t stream);
|
a4f160b029e0584691a73881c9f1d25d899e3ec8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <cstdlib>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <ctime>
#include <fstream>
//funcion para generar una jewel aleatoria, como la generacion inicial.
int generarJewel(int dificultad) {
srand(time(NULL));
switch (dificultad) {
case 1: {
int randJewel = rand() % 4 + 1;
return randJewel;
}
case 2: {
int randJewel = rand() % 6 + 1;
return randJewel;
}
case 3: {
int randJewel = rand() % 8 + 1;
return randJewel;
}
}
return -1;
}
void generacionInicialRandomJewels(float *tablero, int dificultad, int anchura, int altura) {
srand(time(NULL));
for (int i = 0; i < altura*anchura; i++) {
switch (dificultad) {
case 1: {
int randJewel = rand() % 4 + 1;
tablero[i] = randJewel;
break;
}
case 2: {
int randJewel = rand() % 6 + 1;
tablero[i] = randJewel;
break;
}
case 3: {
int randJewel = rand() % 8 + 1;
tablero[i] = randJewel;
break;
}
}
}
}
void printTablero(float* tablero, int anchura, int altura) {
for (int i = altura - 1; i >= 0; i--) {
printf("\n");
for (int j = 0; j < anchura; j++) {
printf("%d ", (int)tablero[j + i*anchura]);
}
}
printf("\n");
}
void eliminarJewels(float* tablero, float* jewels_eliminadas, int dificultad, int anchura, int altura) {
int max = 0;
if (altura >= anchura) max = altura;
else max = anchura;
int final = 0;
bool modif = false;
//Calcula cual es el ultimo valor escrito de las jewels a eliminar, ya que puede haber posiciones no escritas
for (int i = 0; i < max * 2; i++) {
if (jewels_eliminadas[i] < 0) {
final = i;
modif = true;
break;
}
}
//En caso de que este completamente escrito
if (!modif) final = max * 2;
//printf("\nFinal: %i\n", final);
srand(time(NULL));
if (jewels_eliminadas[0] != jewels_eliminadas[2]) {
for (int y = jewels_eliminadas[1]; y < altura; y++) {
//printf("A");
for (int x = jewels_eliminadas[0]; x <= jewels_eliminadas[final - 2]; x++) {
//printf("\nBUCLE X:%i Y:%i\n", x, y);
if (y + 1 < altura) {
tablero[x + (y)*(anchura)] = tablero[x + (y + 1)*anchura];
switch (dificultad) {
case 1: {
int randJewel = rand() % 4 + 1;
tablero[x + (y+1)*anchura] = randJewel;
break;
}
case 2: {
int randJewel = rand() % 6 + 1;
tablero[x + (y+1)*anchura] = randJewel;
break;
}
case 3: {
int randJewel = rand() % 8 + 1;
tablero[x + (y+1)*anchura] = randJewel;
break;
}
}
}
else {
switch (dificultad) {
case 1: {
int randJewel = rand() % 4 + 1;
tablero[x + y*anchura] = randJewel;
break;
}
case 2: {
int randJewel = rand() % 6 + 1;
tablero[x + y*anchura] = randJewel;
break;
}
case 3: {
int randJewel = rand() % 8 + 1;
tablero[x + y*anchura] = randJewel;
break;
}
}
}
}
}
}else{
int posicion = jewels_eliminadas[0] + jewels_eliminadas[1] * anchura;
float valor = tablero[posicion];
for (int y = jewels_eliminadas[1]; y < altura; y++) {
//printf("A");
for (int x = jewels_eliminadas[0]; x <= jewels_eliminadas[final - 2]; x++) {
//printf("\nBUCLE X:%i Y:%i\n", x, y);
if (y < altura) {
if (y >= jewels_eliminadas[final-2]) {
tablero[x + (y-final/2)*(anchura)] = tablero[x + (y)*anchura];
switch (dificultad) {
case 1: {
int randJewel = rand() % 4 + 1;
tablero[x + (y)*anchura] = randJewel;
break;
}
case 2: {
int randJewel = rand() % 6 + 1;
tablero[x + (y)*anchura] = randJewel;
break;
}
case 3: {
int randJewel = rand() % 8 + 1;
tablero[x + (y)*anchura] = randJewel;
break;
}
}
}
else {
switch (dificultad) {
case 1: {
int randJewel = rand() % 4 + 1;
tablero[x + (y)*anchura] = randJewel;
break;
}
case 2: {
int randJewel = rand() % 6 + 1;
tablero[x + (y)*anchura] = randJewel;
break;
}
case 3: {
int randJewel = rand() % 8 + 1;
tablero[x + (y)*anchura] = randJewel;
break;
}
}
}
}
}
}
}
}
void analisisTableroManual(int dificultad, float* tablero, int anchura, int altura, int x, int y) {
int max = 0;
int size = anchura*altura;
if (altura >= anchura) max = altura;
else max = anchura;
//Solo se eliminan MAX jewels como mucho, se guardan sus x e y
float* jewels_eliminadas = (float*)malloc(2 * max * sizeof(float));
for (int i = 0; i < max; i++) {
jewels_eliminadas[i] = -1;
}
int jewels_posibles_izq = 0;
int jewels_posibles_der = 0;
//printf("\nHORIZONTAL\n");
//Si tiene por la izquierda
if ((x - 1 + y*anchura >= 0) && tablero[x - 1 + y*anchura] == tablero[x + y*anchura]) {
int i = 1;
while ((x - i + y*anchura >= 0) && (x -i>=0) && tablero[x - i + y*anchura] == tablero[x + y*anchura]) {
jewels_posibles_izq++;
i++;
}
}
//Si tiene por la derecha
if ((x + 1 + y*anchura <= size) && tablero[x + 1 + y*anchura] == tablero[x + y*anchura]) {
int i = 1;
while ((x + i + y*anchura <= size) && (x + i < anchura) && tablero[x + i + y*anchura] == tablero[x + y*anchura]) {
jewels_posibles_der++;
i++;
}
}
//Se pueden eliminar horizontalmente
if (1 + jewels_posibles_izq + jewels_posibles_der >= 3) {
int salto = 0;
//printf("\nIZQ:%i DER:%i\n",jewels_posibles_izq,jewels_posibles_der);
for (int j = jewels_posibles_izq; j >= (1); j--) {
jewels_eliminadas[salto] = x - j;
jewels_eliminadas[salto + 1] = y;
salto += 2;
}
jewels_eliminadas[jewels_posibles_izq*2] = x;
jewels_eliminadas[jewels_posibles_izq*2+1] = y;
salto = 2;
for (int k = 1; k <= jewels_posibles_der; k++) {
jewels_eliminadas[salto + jewels_posibles_izq*2] = x + k;
jewels_eliminadas[salto + jewels_posibles_izq*2 + 1] = y;
salto += 2;
}
}
else { //Analizamos la vertical
int jewels_posibles_arrib = 0;
int jewels_posibles_abaj = 0;
//printf("\nVERTICAL\n");
//Si tiene por abajo
if ((x + (y - 1)*anchura >= 0) && tablero[x + (y - 1)*anchura] == tablero[x + y*anchura]) {
printf("\nABAJO\n");
int i = 1;
while ((x + (y - i)*anchura >= 0) && tablero[x + (y - i)*anchura] == tablero[x + y*anchura]) {
jewels_posibles_abaj++;
//printf("\nTIENE ABAJO\n");
i++;
}
}
//Si tiene por arriba
if ((x + 1 + y*anchura <= size) && tablero[x + (y + 1)*anchura] == tablero[x + y*anchura]) {
//printf("\nARRIBA\n");
int i = 1;
while ((x + (y + i)*anchura <= size) && tablero[x + (y + i)*anchura] == tablero[x + y*anchura]) {
jewels_posibles_arrib++;
//printf("\nTIENE ARRIBA\n");
i++;
}
}
//Se pueden eliminar
if (1 + jewels_posibles_abaj + jewels_posibles_arrib >= 3) {
//printf("\nSE PUEDE\n");
int salto = 0;
for (int j = jewels_posibles_abaj; j >= (1); j--) {
jewels_eliminadas[salto] = x;
jewels_eliminadas[salto + 1] = y - j;
salto += 2;
}
jewels_eliminadas[jewels_posibles_abaj*2] = x;
jewels_eliminadas[jewels_posibles_abaj*2+1] = y;
salto = 2;
for (int k = 1; k <= jewels_posibles_arrib; k++) {
jewels_eliminadas[salto + jewels_posibles_abaj*2] = x;
jewels_eliminadas[salto + jewels_posibles_abaj*2 + 1] = y + k;
salto += 2;
}
}
}
/*for (int q = 0; q < 2 * max; q++) {
if (q % 2 != 0) {
printf(" y:%f\n", jewels_eliminadas[q]);
}
else {
printf("| x:%f\n", jewels_eliminadas[q]);
}
}*/
eliminarJewels(tablero, jewels_eliminadas, dificultad, anchura, altura);
free(jewels_eliminadas);
}
void intercambiarPosiciones(float* tablero, int jewel1_x, int jewel1_y, int direccion, int anchura, int altura, int seleccion, int dificultad) {
int jewel2_x = jewel1_x;
int jewel2_y = jewel1_y;
switch (direccion)
{
case 1: //Arriba
{
jewel2_y += 1;
break;
}
case 2: //Abajo
{
jewel2_y -= 1;
break;
}
case 3: //Izquierda
{
jewel2_x -= 1;
break;
}
case 4: //Derecha
{
jewel2_x += 1;
break;
}
}
int aux1;
aux1 = tablero[jewel2_x + jewel2_y*anchura];
tablero[jewel2_x + jewel2_y*anchura] = tablero[jewel1_x + jewel1_y*anchura];
tablero[jewel1_x + jewel1_y*anchura] = aux1;
if (seleccion == 2)
analisisTableroManual(dificultad, tablero, anchura, altura, jewel2_x, jewel2_y);
}
//Funcion CPU. TODO: Arreglar calculo de contiguos, posible fallo al contar
void analisisTableroAutomatico(int dificultad, float* tablero, int anchura, int altura) {
int max = 0;
int size = anchura*altura;
int jewels_posibles_der = 0;
if (altura >= anchura) max = altura;
else max = anchura;
//Solo se eliminan MAX jewels como mucho, se guardan sus x e y
float* jewels_eliminadas = (float*)malloc(2 * max * sizeof(float));
//Tablero auxiliar para la toma del mejor caso
float* aux_tablero = (float*)malloc(size * sizeof(float));
for (int i = 0; i < max; i++) {
jewels_eliminadas[i] = -1;
}
//printf("\nAUTOMATICO\n");
for (int y = 0; y < altura; y++) {
for (int x = 0; x < anchura; x++) {
jewels_posibles_der = 0;
//Si tiene por la derecha
if ((x + 2) < anchura) {
if (((x + 2) + y*anchura <= size) && tablero[x + 2 + y*anchura] == tablero[x + y*anchura]) {
int i = 2;
while ((x + i + y*anchura <= size) && tablero[x + i + y*anchura] == tablero[x + y*anchura]) {
jewels_posibles_der++;
i++;
}
aux_tablero[x + y*anchura] = jewels_posibles_der + 1;
}
else {
aux_tablero[x + y*anchura] = 1;
}
}
else {
aux_tablero[x + y*anchura] = 1;
}
}
}
int x_mejor = 0;
int y_mejor = 0;
int valor_mejor = 0;
for (int y = 0; y < altura; y++) {
for (int x = 0; x < anchura; x++) {
if (aux_tablero[x + y*anchura] > valor_mejor) {
x_mejor = x;
y_mejor = y;
valor_mejor = aux_tablero[x + y*anchura];
}
}
}
intercambiarPosiciones(tablero, x_mejor, y_mejor, 4, anchura, altura, 1, dificultad);
//Se puede eliminar
if (valor_mejor >= 3) {
jewels_eliminadas[0] = x_mejor;
jewels_eliminadas[1] = y_mejor;
int salto = 2;
for (int j = 1; j <= (valor_mejor); j++) {
jewels_eliminadas[salto] = x_mejor + j;
jewels_eliminadas[salto + 1] = y_mejor;
salto += 2;
}
}
eliminarJewels(tablero, jewels_eliminadas, dificultad, anchura, altura);
free(jewels_eliminadas);
free(aux_tablero);
}
bool precargar(int& anchura, int& altura, int& dificultad, char* fichero)
{
std::ifstream fAnchura("anchura.txt");
if (!fAnchura.is_open())
{
std::cout << "ERROR: no existe un archivo guardado." << std::endl;
return false;
}
fAnchura >> anchura;
fAnchura.close();
std::ifstream fAltura("altura.txt");
if (!fAltura.is_open())
{
std::cout << "ERROR: no existe un archivo guardado." << std::endl;
return false;
}
fAltura >> altura;
fAltura.close();
std::ifstream fDificultad("dificultad.txt");
if (!fDificultad.is_open())
{
std::cout << "ERROR: no existe un archivo guardado." << std::endl;
return false;
}
fDificultad >> dificultad;
fDificultad.close();
std::ifstream fCarga(fichero);
if (!fCarga.is_open())
{
std::cout << "ERROR: no existe un archivo guardado." << std::endl;
return false;
}
fCarga.close();
return true;
}
void cargar(int anchura, int altura, float* tablero, char* fichero)
{
int aux;
char* array = (char*)malloc(anchura*altura + 1);
std::ifstream fCarga(fichero);
fCarga.getline(array, anchura*altura + 1);
for (int i = 0; i < anchura*altura; i++)
{
aux = (array[i] - 48);
tablero[i] = (float)aux;
}
free(array);
fCarga.close();
}
void guardado(float* tablero, int anchura, int altura, int dificultad, char* fichero)
{
//Sistema de guardado
std::ofstream ficheroAnchura;
ficheroAnchura.open("Anchura.txt");
ficheroAnchura.clear();
ficheroAnchura << anchura;
ficheroAnchura.close();
std::ofstream ficheroAltura;
ficheroAltura.open("Altura.txt");
ficheroAltura.clear();
ficheroAltura << altura;
ficheroAltura.close();
std::ofstream ficheroDificultad;
ficheroDificultad.open("Dificultad.txt");
ficheroDificultad.clear();
ficheroDificultad << dificultad;
ficheroDificultad.close();
std::ofstream ficheroGuardado;
ficheroGuardado.open(fichero);
ficheroGuardado.clear();
for (int index = 0; index < anchura*altura; index++)
{
ficheroGuardado << tablero[index];
}
ficheroGuardado.close();
}
void bombaFila(float* tablero, int anchura, int altura, int dificultad, int fila) {
for (int iFila = 0; (iFila + fila) < altura; iFila++)
{
for (int iColm = 0; iColm < anchura; iColm++)
{
if ((iFila + fila + 1) < altura)
{
tablero[(iFila + fila)*anchura + iColm] = tablero[(iFila + fila + 1)*altura + iColm];
}
else {
tablero[(iFila + fila)*anchura + iColm] = generarJewel(dificultad);
}
}
}
}
void bombaColumna(float* tablero, int anchura, int altura, int dificultad, int columna) {
for (int iFila = 0; iFila < altura; iFila++)
{
for (int iColm = 0; (columna - iColm) > 0; iColm++)
{
if ((columna - iColm - 1) < 0)
{
tablero[(iFila*anchura) + (columna - iColm)] = generarJewel(dificultad);
}
else {
tablero[(iFila*anchura) + (columna - iColm)] = tablero[(iFila*altura) + (columna - iColm - 1)];
}
}
}
}
void bombaRotarCPU(float* tablero, int anchura, int altura, int fila, int columna)
{
float aux[9];
int index = 0;
for (int iColm = columna - 1; iColm <= columna + 1; iColm++)
{
for (int iFila = fila + 1; iFila >= fila - 1; iFila--)
{
aux[index] = tablero[iFila*anchura + iColm];
index++;
}
}
index = 0;
for (int iFila = 0; iFila < 3; iFila++)
{
for (int iColumna = 0; iColumna < 3; iColumna++)
{
tablero[(iFila + fila - 1)*anchura + (columna - 1) + iColumna] = aux[index];
index++;
}
}
}
int main(int argc, char** argv) {
//Matriz de tamao variable de floats, un array de Altura*Anchura
int anchura;
int altura;
int dificultad;
char modo;
bool automatico = true;
int size;
char ficheroGuardado[9] = "save.txt";
bool encontrado = false;
int seleccion;
float *tablero;
/* Valores por argumento*/
if (argc == 1)
{
std::cout << "Anchura del tablero: ";
std::cin >> anchura;
std::cout << "Altura del tablero: ";
std::cin >> altura;
std::cout << "Elija dificultad: \n1.-Facil \n2.-Media \n3.-Dificil\n";
std::cin >> dificultad;
std::cout << "Automatico? 1.-SI 2.-NO\n";
std::cin >> seleccion;
}
else
{
modo = argv[1][1];
dificultad = atoi(argv[2]);
anchura = atoi(argv[3]);
altura = atoi(argv[4]);
switch (modo) {
case 'a': {seleccion = 1; break; }
case 'm': {seleccion = 2; break; }
default: printf("Valor no valido.\n"); return -1;
}
}
bool jugando = true;
/* Establecer automatico como modo de juego */
size = anchura*altura;
tablero = (float*)malloc(size * sizeof(float));
//Se inicializa la matriz
generacionInicialRandomJewels(tablero, dificultad, anchura, altura);
//Bucle principal del juego
while (jugando) {
printTablero(tablero, anchura, altura);
int jewel1_x = 0;
int jewel1_y = 0;
int accion = 0;
std::cout << "Accin a realizar:\n";
std::cout << "(1) Intercambiar Jewels\n";
std::cout << "(2) Guardar partida\n";
std::cout << "(3) Cargar partida\n";
std::cout << "(9) Usar una Bomba\n";
std::cout << "(0) Exit\n";
std::cout << "Elija accion: ";
std::cin >> accion;
switch (accion) {
case 0: {
free(tablero);
return 0;
break;
}
case 1: {
if (seleccion == 2)
{
std::cout << "Posicion de la primera jewel a intercambiar (empiezan en 0)\n";
std::cout << "Columna: ";
std::cin >> jewel1_x;
std::cout << "Fila: ";
std::cin >> jewel1_y;
if (!((jewel1_x < anchura) && (jewel1_x >= 0) && (jewel1_y < altura) && (jewel1_y >= 0))) {
printf("Posicion erronea.\n");
continue;
}
int direccion = 0;
std::cout << "Direccion a seguir para intercambio de posiciones: \n 1.-Arriba\n 2.-Abajo\n 3.-Izquierda\n 4.-Derecha\n";
std::cin >> direccion;
if (direccion > 4 && direccion > 1) {
printf("Direccion erronea.\n");
continue;
}
else {
switch (direccion)
{
case 1: //Arriba
{
if (jewel1_y == altura)
{
printf("No se puede realizar el intercambio especificado.\n");
continue;
}
break;
}
case 2: //Abajo
{
if (jewel1_y == 0)
{
printf("No se puede realizar el intercambio especificado.\n");
continue;
}
break;
}
case 3: //Izquierda
{
if (jewel1_x == 0)
{
printf("No se puede realizar el intercambio especificado.\n");
continue;
}
break;
}
case 4: //Derecha
{
if (jewel1_x == anchura - 1)
{
printf("No se puede realizar el intercambio especificado.\n");
continue;
}
break;
}
}
}
/* Intercambiar posiciones */
intercambiarPosiciones(tablero, jewel1_x, jewel1_y, direccion, anchura, altura, seleccion, dificultad);
}
else if (seleccion == 1)
{
/* Analisis automatico */
analisisTableroAutomatico(dificultad, tablero, anchura, altura);
}
break;
}
case 2: {
guardado(tablero, anchura, altura, dificultad, ficheroGuardado);
std::cout << "Guardado correcto.\n";
break;
}
case 3: {
/* Precarga de tablero */
int encontrado = precargar(anchura, altura, dificultad, ficheroGuardado);
size = anchura*altura;
if (encontrado)
{
free(tablero);
tablero = (float*)malloc(size * sizeof(float));
/* Cargar tablero */
cargar(anchura, altura, tablero, ficheroGuardado);
std::cout << "Automatico? 1.-SI 2.-NO\n";
std::cin >> seleccion;
std::cout << "Se ha cargado el Tablero: \n";
}
else {
std::cout << "No existe ninguna partida guardada.\n";
}
break;
}
case 9: {
// Bomba
int bomba = 0;
int fila = 0; int columna = 0;
std::cout << "Elija una bomba:";
/* Bombas por tipo de dificultad */
switch (dificultad) {
case 1: {
std::cout << "(1) Bomba de fila ";
std::cout << "\nEleccion: ";
std::cin >> bomba;
if (bomba != 1)
{
printf("Bomba erronea.\n");
continue;
}
std::cout << "X: ";
std::cin >> fila;
bombaFila(tablero, anchura, altura, dificultad, fila);
break;
}
case 2: {
std::cout << "(1) Bomba de fila";
std::cout << "(2) Bomba de columna";
std::cout << "\nEleccion: ";
std::cin >> bomba;
if (bomba < 1 && bomba > 2)
{
printf("Bomba erronea.\n");
continue;
}
switch (bomba) {
case 1:
{
std::cout << "X: ";
std::cin >> fila;
bombaFila(tablero, anchura, altura, dificultad, fila);
break;
}
case 2:
{
std::cout << "Y: ";
std::cin >> columna;
bombaColumna(tablero, anchura, altura, dificultad, columna);
break;
}
}
break;
}
case 3: {
std::cout << "(1) Bomba de fila";
std::cout << "(2) Bomba de columna";
std::cout << "(3) Bomba de rotacion 3x3";
std::cout << "\nEleccion: ";
std::cin >> bomba;
if (bomba < 1 && bomba > 3)
{
printf("Bomba erronea.\n");
continue;
}
switch (bomba) {
case 1:
{
std::cout << "X: ";
std::cin >> fila;
bombaFila(tablero, anchura, altura, dificultad, fila);
break;
}
case 2:
{
std::cout << "Y: ";
std::cin >> columna;
bombaColumna(tablero, anchura, altura, dificultad, columna);
break;
}
case 3:
{
for (int fila = 1; fila < anchura; fila += 3)
{
for (int columna = 1; columna < altura; columna += 3)
{
if ((fila - 1) < 0 || (fila + 1) >= altura || (columna - 1) < 0 || (columna + 1) >= anchura)
{
/* Se entra cuando no se puede rotar */
}
else
{
bombaRotarCPU(tablero, anchura, altura, fila, columna);
}
}
}
break;
}
}
break;
}
}
break;
}
}
}
free(tablero);
return 0;
} | a4f160b029e0584691a73881c9f1d25d899e3ec8.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <iostream>
#include <cstdlib>
#include <curand.h>
#include <curand_kernel.h>
#include <ctime>
#include <fstream>
//funcion para generar una jewel aleatoria, como la generacion inicial.
int generarJewel(int dificultad) {
srand(time(NULL));
switch (dificultad) {
case 1: {
int randJewel = rand() % 4 + 1;
return randJewel;
}
case 2: {
int randJewel = rand() % 6 + 1;
return randJewel;
}
case 3: {
int randJewel = rand() % 8 + 1;
return randJewel;
}
}
return -1;
}
void generacionInicialRandomJewels(float *tablero, int dificultad, int anchura, int altura) {
srand(time(NULL));
for (int i = 0; i < altura*anchura; i++) {
switch (dificultad) {
case 1: {
int randJewel = rand() % 4 + 1;
tablero[i] = randJewel;
break;
}
case 2: {
int randJewel = rand() % 6 + 1;
tablero[i] = randJewel;
break;
}
case 3: {
int randJewel = rand() % 8 + 1;
tablero[i] = randJewel;
break;
}
}
}
}
void printTablero(float* tablero, int anchura, int altura) {
for (int i = altura - 1; i >= 0; i--) {
printf("\n");
for (int j = 0; j < anchura; j++) {
printf("%d ", (int)tablero[j + i*anchura]);
}
}
printf("\n");
}
void eliminarJewels(float* tablero, float* jewels_eliminadas, int dificultad, int anchura, int altura) {
int max = 0;
if (altura >= anchura) max = altura;
else max = anchura;
int final = 0;
bool modif = false;
//Calcula cual es el ultimo valor escrito de las jewels a eliminar, ya que puede haber posiciones no escritas
for (int i = 0; i < max * 2; i++) {
if (jewels_eliminadas[i] < 0) {
final = i;
modif = true;
break;
}
}
//En caso de que este completamente escrito
if (!modif) final = max * 2;
//printf("\nFinal: %i\n", final);
srand(time(NULL));
if (jewels_eliminadas[0] != jewels_eliminadas[2]) {
for (int y = jewels_eliminadas[1]; y < altura; y++) {
//printf("A");
for (int x = jewels_eliminadas[0]; x <= jewels_eliminadas[final - 2]; x++) {
//printf("\nBUCLE X:%i Y:%i\n", x, y);
if (y + 1 < altura) {
tablero[x + (y)*(anchura)] = tablero[x + (y + 1)*anchura];
switch (dificultad) {
case 1: {
int randJewel = rand() % 4 + 1;
tablero[x + (y+1)*anchura] = randJewel;
break;
}
case 2: {
int randJewel = rand() % 6 + 1;
tablero[x + (y+1)*anchura] = randJewel;
break;
}
case 3: {
int randJewel = rand() % 8 + 1;
tablero[x + (y+1)*anchura] = randJewel;
break;
}
}
}
else {
switch (dificultad) {
case 1: {
int randJewel = rand() % 4 + 1;
tablero[x + y*anchura] = randJewel;
break;
}
case 2: {
int randJewel = rand() % 6 + 1;
tablero[x + y*anchura] = randJewel;
break;
}
case 3: {
int randJewel = rand() % 8 + 1;
tablero[x + y*anchura] = randJewel;
break;
}
}
}
}
}
}else{
int posicion = jewels_eliminadas[0] + jewels_eliminadas[1] * anchura;
float valor = tablero[posicion];
for (int y = jewels_eliminadas[1]; y < altura; y++) {
//printf("A");
for (int x = jewels_eliminadas[0]; x <= jewels_eliminadas[final - 2]; x++) {
//printf("\nBUCLE X:%i Y:%i\n", x, y);
if (y < altura) {
if (y >= jewels_eliminadas[final-2]) {
tablero[x + (y-final/2)*(anchura)] = tablero[x + (y)*anchura];
switch (dificultad) {
case 1: {
int randJewel = rand() % 4 + 1;
tablero[x + (y)*anchura] = randJewel;
break;
}
case 2: {
int randJewel = rand() % 6 + 1;
tablero[x + (y)*anchura] = randJewel;
break;
}
case 3: {
int randJewel = rand() % 8 + 1;
tablero[x + (y)*anchura] = randJewel;
break;
}
}
}
else {
switch (dificultad) {
case 1: {
int randJewel = rand() % 4 + 1;
tablero[x + (y)*anchura] = randJewel;
break;
}
case 2: {
int randJewel = rand() % 6 + 1;
tablero[x + (y)*anchura] = randJewel;
break;
}
case 3: {
int randJewel = rand() % 8 + 1;
tablero[x + (y)*anchura] = randJewel;
break;
}
}
}
}
}
}
}
}
void analisisTableroManual(int dificultad, float* tablero, int anchura, int altura, int x, int y) {
int max = 0;
int size = anchura*altura;
if (altura >= anchura) max = altura;
else max = anchura;
//Solo se eliminan MAX jewels como mucho, se guardan sus x e y
float* jewels_eliminadas = (float*)malloc(2 * max * sizeof(float));
for (int i = 0; i < max; i++) {
jewels_eliminadas[i] = -1;
}
int jewels_posibles_izq = 0;
int jewels_posibles_der = 0;
//printf("\nHORIZONTAL\n");
//Si tiene por la izquierda
if ((x - 1 + y*anchura >= 0) && tablero[x - 1 + y*anchura] == tablero[x + y*anchura]) {
int i = 1;
while ((x - i + y*anchura >= 0) && (x -i>=0) && tablero[x - i + y*anchura] == tablero[x + y*anchura]) {
jewels_posibles_izq++;
i++;
}
}
//Si tiene por la derecha
if ((x + 1 + y*anchura <= size) && tablero[x + 1 + y*anchura] == tablero[x + y*anchura]) {
int i = 1;
while ((x + i + y*anchura <= size) && (x + i < anchura) && tablero[x + i + y*anchura] == tablero[x + y*anchura]) {
jewels_posibles_der++;
i++;
}
}
//Se pueden eliminar horizontalmente
if (1 + jewels_posibles_izq + jewels_posibles_der >= 3) {
int salto = 0;
//printf("\nIZQ:%i DER:%i\n",jewels_posibles_izq,jewels_posibles_der);
for (int j = jewels_posibles_izq; j >= (1); j--) {
jewels_eliminadas[salto] = x - j;
jewels_eliminadas[salto + 1] = y;
salto += 2;
}
jewels_eliminadas[jewels_posibles_izq*2] = x;
jewels_eliminadas[jewels_posibles_izq*2+1] = y;
salto = 2;
for (int k = 1; k <= jewels_posibles_der; k++) {
jewels_eliminadas[salto + jewels_posibles_izq*2] = x + k;
jewels_eliminadas[salto + jewels_posibles_izq*2 + 1] = y;
salto += 2;
}
}
else { //Analizamos la vertical
int jewels_posibles_arrib = 0;
int jewels_posibles_abaj = 0;
//printf("\nVERTICAL\n");
//Si tiene por abajo
if ((x + (y - 1)*anchura >= 0) && tablero[x + (y - 1)*anchura] == tablero[x + y*anchura]) {
printf("\nABAJO\n");
int i = 1;
while ((x + (y - i)*anchura >= 0) && tablero[x + (y - i)*anchura] == tablero[x + y*anchura]) {
jewels_posibles_abaj++;
//printf("\nTIENE ABAJO\n");
i++;
}
}
//Si tiene por arriba
if ((x + 1 + y*anchura <= size) && tablero[x + (y + 1)*anchura] == tablero[x + y*anchura]) {
//printf("\nARRIBA\n");
int i = 1;
while ((x + (y + i)*anchura <= size) && tablero[x + (y + i)*anchura] == tablero[x + y*anchura]) {
jewels_posibles_arrib++;
//printf("\nTIENE ARRIBA\n");
i++;
}
}
//Se pueden eliminar
if (1 + jewels_posibles_abaj + jewels_posibles_arrib >= 3) {
//printf("\nSE PUEDE\n");
int salto = 0;
for (int j = jewels_posibles_abaj; j >= (1); j--) {
jewels_eliminadas[salto] = x;
jewels_eliminadas[salto + 1] = y - j;
salto += 2;
}
jewels_eliminadas[jewels_posibles_abaj*2] = x;
jewels_eliminadas[jewels_posibles_abaj*2+1] = y;
salto = 2;
for (int k = 1; k <= jewels_posibles_arrib; k++) {
jewels_eliminadas[salto + jewels_posibles_abaj*2] = x;
jewels_eliminadas[salto + jewels_posibles_abaj*2 + 1] = y + k;
salto += 2;
}
}
}
/*for (int q = 0; q < 2 * max; q++) {
if (q % 2 != 0) {
printf(" y:%f\n", jewels_eliminadas[q]);
}
else {
printf("| x:%f\n", jewels_eliminadas[q]);
}
}*/
eliminarJewels(tablero, jewels_eliminadas, dificultad, anchura, altura);
free(jewels_eliminadas);
}
void intercambiarPosiciones(float* tablero, int jewel1_x, int jewel1_y, int direccion, int anchura, int altura, int seleccion, int dificultad) {
int jewel2_x = jewel1_x;
int jewel2_y = jewel1_y;
switch (direccion)
{
case 1: //Arriba
{
jewel2_y += 1;
break;
}
case 2: //Abajo
{
jewel2_y -= 1;
break;
}
case 3: //Izquierda
{
jewel2_x -= 1;
break;
}
case 4: //Derecha
{
jewel2_x += 1;
break;
}
}
int aux1;
aux1 = tablero[jewel2_x + jewel2_y*anchura];
tablero[jewel2_x + jewel2_y*anchura] = tablero[jewel1_x + jewel1_y*anchura];
tablero[jewel1_x + jewel1_y*anchura] = aux1;
if (seleccion == 2)
analisisTableroManual(dificultad, tablero, anchura, altura, jewel2_x, jewel2_y);
}
//Funcion CPU. TODO: Arreglar calculo de contiguos, posible fallo al contar
void analisisTableroAutomatico(int dificultad, float* tablero, int anchura, int altura) {
int max = 0;
int size = anchura*altura;
int jewels_posibles_der = 0;
if (altura >= anchura) max = altura;
else max = anchura;
//Solo se eliminan MAX jewels como mucho, se guardan sus x e y
float* jewels_eliminadas = (float*)malloc(2 * max * sizeof(float));
//Tablero auxiliar para la toma del mejor caso
float* aux_tablero = (float*)malloc(size * sizeof(float));
for (int i = 0; i < max; i++) {
jewels_eliminadas[i] = -1;
}
//printf("\nAUTOMATICO\n");
for (int y = 0; y < altura; y++) {
for (int x = 0; x < anchura; x++) {
jewels_posibles_der = 0;
//Si tiene por la derecha
if ((x + 2) < anchura) {
if (((x + 2) + y*anchura <= size) && tablero[x + 2 + y*anchura] == tablero[x + y*anchura]) {
int i = 2;
while ((x + i + y*anchura <= size) && tablero[x + i + y*anchura] == tablero[x + y*anchura]) {
jewels_posibles_der++;
i++;
}
aux_tablero[x + y*anchura] = jewels_posibles_der + 1;
}
else {
aux_tablero[x + y*anchura] = 1;
}
}
else {
aux_tablero[x + y*anchura] = 1;
}
}
}
int x_mejor = 0;
int y_mejor = 0;
int valor_mejor = 0;
for (int y = 0; y < altura; y++) {
for (int x = 0; x < anchura; x++) {
if (aux_tablero[x + y*anchura] > valor_mejor) {
x_mejor = x;
y_mejor = y;
valor_mejor = aux_tablero[x + y*anchura];
}
}
}
intercambiarPosiciones(tablero, x_mejor, y_mejor, 4, anchura, altura, 1, dificultad);
//Se puede eliminar
if (valor_mejor >= 3) {
jewels_eliminadas[0] = x_mejor;
jewels_eliminadas[1] = y_mejor;
int salto = 2;
for (int j = 1; j <= (valor_mejor); j++) {
jewels_eliminadas[salto] = x_mejor + j;
jewels_eliminadas[salto + 1] = y_mejor;
salto += 2;
}
}
eliminarJewels(tablero, jewels_eliminadas, dificultad, anchura, altura);
free(jewels_eliminadas);
free(aux_tablero);
}
bool precargar(int& anchura, int& altura, int& dificultad, char* fichero)
{
std::ifstream fAnchura("anchura.txt");
if (!fAnchura.is_open())
{
std::cout << "ERROR: no existe un archivo guardado." << std::endl;
return false;
}
fAnchura >> anchura;
fAnchura.close();
std::ifstream fAltura("altura.txt");
if (!fAltura.is_open())
{
std::cout << "ERROR: no existe un archivo guardado." << std::endl;
return false;
}
fAltura >> altura;
fAltura.close();
std::ifstream fDificultad("dificultad.txt");
if (!fDificultad.is_open())
{
std::cout << "ERROR: no existe un archivo guardado." << std::endl;
return false;
}
fDificultad >> dificultad;
fDificultad.close();
std::ifstream fCarga(fichero);
if (!fCarga.is_open())
{
std::cout << "ERROR: no existe un archivo guardado." << std::endl;
return false;
}
fCarga.close();
return true;
}
void cargar(int anchura, int altura, float* tablero, char* fichero)
{
int aux;
char* array = (char*)malloc(anchura*altura + 1);
std::ifstream fCarga(fichero);
fCarga.getline(array, anchura*altura + 1);
for (int i = 0; i < anchura*altura; i++)
{
aux = (array[i] - 48);
tablero[i] = (float)aux;
}
free(array);
fCarga.close();
}
void guardado(float* tablero, int anchura, int altura, int dificultad, char* fichero)
{
//Sistema de guardado
std::ofstream ficheroAnchura;
ficheroAnchura.open("Anchura.txt");
ficheroAnchura.clear();
ficheroAnchura << anchura;
ficheroAnchura.close();
std::ofstream ficheroAltura;
ficheroAltura.open("Altura.txt");
ficheroAltura.clear();
ficheroAltura << altura;
ficheroAltura.close();
std::ofstream ficheroDificultad;
ficheroDificultad.open("Dificultad.txt");
ficheroDificultad.clear();
ficheroDificultad << dificultad;
ficheroDificultad.close();
std::ofstream ficheroGuardado;
ficheroGuardado.open(fichero);
ficheroGuardado.clear();
for (int index = 0; index < anchura*altura; index++)
{
ficheroGuardado << tablero[index];
}
ficheroGuardado.close();
}
void bombaFila(float* tablero, int anchura, int altura, int dificultad, int fila) {
for (int iFila = 0; (iFila + fila) < altura; iFila++)
{
for (int iColm = 0; iColm < anchura; iColm++)
{
if ((iFila + fila + 1) < altura)
{
tablero[(iFila + fila)*anchura + iColm] = tablero[(iFila + fila + 1)*altura + iColm];
}
else {
tablero[(iFila + fila)*anchura + iColm] = generarJewel(dificultad);
}
}
}
}
void bombaColumna(float* tablero, int anchura, int altura, int dificultad, int columna) {
for (int iFila = 0; iFila < altura; iFila++)
{
for (int iColm = 0; (columna - iColm) > 0; iColm++)
{
if ((columna - iColm - 1) < 0)
{
tablero[(iFila*anchura) + (columna - iColm)] = generarJewel(dificultad);
}
else {
tablero[(iFila*anchura) + (columna - iColm)] = tablero[(iFila*altura) + (columna - iColm - 1)];
}
}
}
}
void bombaRotarCPU(float* tablero, int anchura, int altura, int fila, int columna)
{
float aux[9];
int index = 0;
for (int iColm = columna - 1; iColm <= columna + 1; iColm++)
{
for (int iFila = fila + 1; iFila >= fila - 1; iFila--)
{
aux[index] = tablero[iFila*anchura + iColm];
index++;
}
}
index = 0;
for (int iFila = 0; iFila < 3; iFila++)
{
for (int iColumna = 0; iColumna < 3; iColumna++)
{
tablero[(iFila + fila - 1)*anchura + (columna - 1) + iColumna] = aux[index];
index++;
}
}
}
int main(int argc, char** argv) {
//Matriz de tamaño variable de floats, un array de Altura*Anchura
int anchura;
int altura;
int dificultad;
char modo;
bool automatico = true;
int size;
char ficheroGuardado[9] = "save.txt";
bool encontrado = false;
int seleccion;
float *tablero;
/* Valores por argumento*/
if (argc == 1)
{
std::cout << "Anchura del tablero: ";
std::cin >> anchura;
std::cout << "Altura del tablero: ";
std::cin >> altura;
std::cout << "Elija dificultad: \n1.-Facil \n2.-Media \n3.-Dificil\n";
std::cin >> dificultad;
std::cout << "Automatico? 1.-SI 2.-NO\n";
std::cin >> seleccion;
}
else
{
modo = argv[1][1];
dificultad = atoi(argv[2]);
anchura = atoi(argv[3]);
altura = atoi(argv[4]);
switch (modo) {
case 'a': {seleccion = 1; break; }
case 'm': {seleccion = 2; break; }
default: printf("Valor no valido.\n"); return -1;
}
}
bool jugando = true;
/* Establecer automatico como modo de juego */
size = anchura*altura;
tablero = (float*)malloc(size * sizeof(float));
//Se inicializa la matriz
generacionInicialRandomJewels(tablero, dificultad, anchura, altura);
//Bucle principal del juego
while (jugando) {
printTablero(tablero, anchura, altura);
int jewel1_x = 0;
int jewel1_y = 0;
int accion = 0;
std::cout << "Acción a realizar:\n";
std::cout << "(1) Intercambiar Jewels\n";
std::cout << "(2) Guardar partida\n";
std::cout << "(3) Cargar partida\n";
std::cout << "(9) Usar una Bomba\n";
std::cout << "(0) Exit\n";
std::cout << "Elija accion: ";
std::cin >> accion;
switch (accion) {
case 0: {
free(tablero);
return 0;
break;
}
case 1: {
if (seleccion == 2)
{
std::cout << "Posicion de la primera jewel a intercambiar (empiezan en 0)\n";
std::cout << "Columna: ";
std::cin >> jewel1_x;
std::cout << "Fila: ";
std::cin >> jewel1_y;
if (!((jewel1_x < anchura) && (jewel1_x >= 0) && (jewel1_y < altura) && (jewel1_y >= 0))) {
printf("Posicion erronea.\n");
continue;
}
int direccion = 0;
std::cout << "Direccion a seguir para intercambio de posiciones: \n 1.-Arriba\n 2.-Abajo\n 3.-Izquierda\n 4.-Derecha\n";
std::cin >> direccion;
if (direccion > 4 && direccion > 1) {
printf("Direccion erronea.\n");
continue;
}
else {
switch (direccion)
{
case 1: //Arriba
{
if (jewel1_y == altura)
{
printf("No se puede realizar el intercambio especificado.\n");
continue;
}
break;
}
case 2: //Abajo
{
if (jewel1_y == 0)
{
printf("No se puede realizar el intercambio especificado.\n");
continue;
}
break;
}
case 3: //Izquierda
{
if (jewel1_x == 0)
{
printf("No se puede realizar el intercambio especificado.\n");
continue;
}
break;
}
case 4: //Derecha
{
if (jewel1_x == anchura - 1)
{
printf("No se puede realizar el intercambio especificado.\n");
continue;
}
break;
}
}
}
/* Intercambiar posiciones */
intercambiarPosiciones(tablero, jewel1_x, jewel1_y, direccion, anchura, altura, seleccion, dificultad);
}
else if (seleccion == 1)
{
/* Analisis automatico */
analisisTableroAutomatico(dificultad, tablero, anchura, altura);
}
break;
}
case 2: {
guardado(tablero, anchura, altura, dificultad, ficheroGuardado);
std::cout << "Guardado correcto.\n";
break;
}
case 3: {
/* Precarga de tablero */
int encontrado = precargar(anchura, altura, dificultad, ficheroGuardado);
size = anchura*altura;
if (encontrado)
{
free(tablero);
tablero = (float*)malloc(size * sizeof(float));
/* Cargar tablero */
cargar(anchura, altura, tablero, ficheroGuardado);
std::cout << "Automatico? 1.-SI 2.-NO\n";
std::cin >> seleccion;
std::cout << "Se ha cargado el Tablero: \n";
}
else {
std::cout << "No existe ninguna partida guardada.\n";
}
break;
}
case 9: {
// Bomba
int bomba = 0;
int fila = 0; int columna = 0;
std::cout << "Elija una bomba:";
/* Bombas por tipo de dificultad */
switch (dificultad) {
case 1: {
std::cout << "(1) Bomba de fila ";
std::cout << "\nEleccion: ";
std::cin >> bomba;
if (bomba != 1)
{
printf("Bomba erronea.\n");
continue;
}
std::cout << "X: ";
std::cin >> fila;
bombaFila(tablero, anchura, altura, dificultad, fila);
break;
}
case 2: {
std::cout << "(1) Bomba de fila";
std::cout << "(2) Bomba de columna";
std::cout << "\nEleccion: ";
std::cin >> bomba;
if (bomba < 1 && bomba > 2)
{
printf("Bomba erronea.\n");
continue;
}
switch (bomba) {
case 1:
{
std::cout << "X: ";
std::cin >> fila;
bombaFila(tablero, anchura, altura, dificultad, fila);
break;
}
case 2:
{
std::cout << "Y: ";
std::cin >> columna;
bombaColumna(tablero, anchura, altura, dificultad, columna);
break;
}
}
break;
}
case 3: {
std::cout << "(1) Bomba de fila";
std::cout << "(2) Bomba de columna";
std::cout << "(3) Bomba de rotacion 3x3";
std::cout << "\nEleccion: ";
std::cin >> bomba;
if (bomba < 1 && bomba > 3)
{
printf("Bomba erronea.\n");
continue;
}
switch (bomba) {
case 1:
{
std::cout << "X: ";
std::cin >> fila;
bombaFila(tablero, anchura, altura, dificultad, fila);
break;
}
case 2:
{
std::cout << "Y: ";
std::cin >> columna;
bombaColumna(tablero, anchura, altura, dificultad, columna);
break;
}
case 3:
{
for (int fila = 1; fila < anchura; fila += 3)
{
for (int columna = 1; columna < altura; columna += 3)
{
if ((fila - 1) < 0 || (fila + 1) >= altura || (columna - 1) < 0 || (columna + 1) >= anchura)
{
/* Se entra cuando no se puede rotar */
}
else
{
bombaRotarCPU(tablero, anchura, altura, fila, columna);
}
}
}
break;
}
}
break;
}
}
break;
}
}
}
free(tablero);
return 0;
} |
9c8f16f4c2b9f1b19079840eb2383c905f1f6ffa.hip | // !!! This is a file automatically generated by hipify!!!
/*********************************************************************
run-cg.cu
Hauptprogramm. Testet Reduktion und ruft cg auf.
**********************************************************************/
#define MAIN_PROGRAM
#include "common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include "global.h"
#include "geometry.h"
#include "linalg.h"
#include "cg.h"
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
int nBytes, status, N;
double *w, *v, *x;
double iStart, iElaps;
N=32;
int dimx = 256;
int dimy = 1;
if (argc>1)
{
N=atoi(argv[1]);
}
if (argc>3)
{
dimx=atoi(argv[2]);
dimy=atoi(argv[3]);
}
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// Globale Variablen setzen:
// Anzahl der Inneren Punkte in x- und y-Richtung
Nx=N;
Ny=N;
// Gesamtanzahl der Gitterpunkte
npts=(Nx+2)*(Ny+2);
// Aktive Punkte - Array
active_pts();
// Speicherbedarf pro Vektor in Byte
nBytes=npts*sizeof(double);
// Speicher fr Vektoren allozieren
w=(double*)malloc(nBytes);
v=(double*)malloc(nBytes);
// auf Null setzen
memset(w, 0, nBytes);
memset(v, 0, nBytes);
// Aktive Punkte ausgeben
if ((Nx<=16)&&(Ny<=16))
print_active();
random_vector(w);
random_vector(v);
double *d_v, *d_w, *d_x;
CHECK(hipMalloc((void **)&d_v, nBytes));
CHECK(hipMalloc((void **)&d_w, nBytes));
// transfer data from host to device
CHECK(hipMemcpy(d_v, v, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_w, w, nBytes, hipMemcpyHostToDevice));
// invoke kernel at host side
block.x=dimx;
block.y=dimy;
block.z=1;
grid.x=(Nx + block.x - 1) / block.x;
grid.y=(Ny + block.y - 1) / block.y;
grid.z=1;
// Test reduction
int Nunroll=8;
if (npts>256 && Nunroll>1)
{
double cpu_sum=0.0;
iStart = seconds();
for (int i = 0; i < npts; i++) cpu_sum += v[i];
iElaps = seconds() - iStart;
printf("cpu reduce elapsed %f sec cpu_sum: %f\n", iElaps, cpu_sum);
dim3 block2 (256,1);
int nblk = (npts + (block2.x*Nunroll) - 1)/(block2.x*Nunroll);
dim3 grid2 (nblk,1);
CHECK(hipMalloc((void **)&d_x, nblk*sizeof(double)));
CHECK(hipMemset(d_x,0,nblk*sizeof(double)));
x=(double*)malloc(nblk*sizeof(double));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( reduceUnrolling), dim3(grid2), dim3(block2), 0, 0, d_v, d_x, npts);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(x, d_x, nblk * sizeof(double),hipMemcpyDeviceToHost));
double gpu_sum = 0.0;
for (int i = 0; i < grid2.x; i++) gpu_sum += x[i];
printf("gpu Unrolling elapsed %f sec gpu_sum: %f <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid2.x, block2.x);
assert(abs((gpu_sum-cpu_sum)/cpu_sum)<sqrt(npts)*DBL_EPSILON);
}
// Einheitsvektor
memset(v, 0, nBytes);
v[coord2index(Nx/2,Nx/2)]=1.0; // v=0, ausser am Gitterpunkt (Nx/2+1,Ny/2+1)
print_vector("v",v,1);
cg(w,v,1000,1e-10,&status);
print_vector("x",w,0);
free(active);
free(w);
free(v);
return (0);
}
| 9c8f16f4c2b9f1b19079840eb2383c905f1f6ffa.cu | /*********************************************************************
run-cg.cu
Hauptprogramm. Testet Reduktion und ruft cg auf.
**********************************************************************/
#define MAIN_PROGRAM
#include "common.h"
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include "global.h"
#include "geometry.h"
#include "linalg.h"
#include "cg.h"
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
int nBytes, status, N;
double *w, *v, *x;
double iStart, iElaps;
N=32;
int dimx = 256;
int dimy = 1;
if (argc>1)
{
N=atoi(argv[1]);
}
if (argc>3)
{
dimx=atoi(argv[2]);
dimy=atoi(argv[3]);
}
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// Globale Variablen setzen:
// Anzahl der Inneren Punkte in x- und y-Richtung
Nx=N;
Ny=N;
// Gesamtanzahl der Gitterpunkte
npts=(Nx+2)*(Ny+2);
// Aktive Punkte - Array
active_pts();
// Speicherbedarf pro Vektor in Byte
nBytes=npts*sizeof(double);
// Speicher für Vektoren allozieren
w=(double*)malloc(nBytes);
v=(double*)malloc(nBytes);
// auf Null setzen
memset(w, 0, nBytes);
memset(v, 0, nBytes);
// Aktive Punkte ausgeben
if ((Nx<=16)&&(Ny<=16))
print_active();
random_vector(w);
random_vector(v);
double *d_v, *d_w, *d_x;
CHECK(cudaMalloc((void **)&d_v, nBytes));
CHECK(cudaMalloc((void **)&d_w, nBytes));
// transfer data from host to device
CHECK(cudaMemcpy(d_v, v, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_w, w, nBytes, cudaMemcpyHostToDevice));
// invoke kernel at host side
block.x=dimx;
block.y=dimy;
block.z=1;
grid.x=(Nx + block.x - 1) / block.x;
grid.y=(Ny + block.y - 1) / block.y;
grid.z=1;
// Test reduction
int Nunroll=8;
if (npts>256 && Nunroll>1)
{
double cpu_sum=0.0;
iStart = seconds();
for (int i = 0; i < npts; i++) cpu_sum += v[i];
iElaps = seconds() - iStart;
printf("cpu reduce elapsed %f sec cpu_sum: %f\n", iElaps, cpu_sum);
dim3 block2 (256,1);
int nblk = (npts + (block2.x*Nunroll) - 1)/(block2.x*Nunroll);
dim3 grid2 (nblk,1);
CHECK(cudaMalloc((void **)&d_x, nblk*sizeof(double)));
CHECK(cudaMemset(d_x,0,nblk*sizeof(double)));
x=(double*)malloc(nblk*sizeof(double));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceUnrolling<<<grid2, block2>>>(d_v, d_x, npts);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(x, d_x, nblk * sizeof(double),cudaMemcpyDeviceToHost));
double gpu_sum = 0.0;
for (int i = 0; i < grid2.x; i++) gpu_sum += x[i];
printf("gpu Unrolling elapsed %f sec gpu_sum: %f <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid2.x, block2.x);
assert(abs((gpu_sum-cpu_sum)/cpu_sum)<sqrt(npts)*DBL_EPSILON);
}
// Einheitsvektor
memset(v, 0, nBytes);
v[coord2index(Nx/2,Nx/2)]=1.0; // v=0, ausser am Gitterpunkt (Nx/2+1,Ny/2+1)
print_vector("v",v,1);
cg(w,v,1000,1e-10,&status);
print_vector("x",w,0);
free(active);
free(w);
free(v);
return (0);
}
|
1881a65aa9c0616b6c2687dd3d2d970b5b7111d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../fixture/benchmark_fixture.hpp"
#include "../synchronization/synchronization.hpp"
#include <cudf_test/column_wrapper.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <type_traits>
enum DispatchingType { HOST_DISPATCHING, DEVICE_DISPATCHING, NO_DISPATCHING };
enum FunctorType { BANDWIDTH_BOUND, COMPUTE_BOUND };
template <class NotFloat, FunctorType ft, class DisableNotFloat = void>
struct Functor {
static __device__ NotFloat f(NotFloat x) { return x; }
};
template <class Float, FunctorType ft>
struct Functor<Float, ft, typename std::enable_if_t<std::is_floating_point<Float>::value>> {
static __device__ Float f(Float x)
{
if (ft == BANDWIDTH_BOUND) {
return x + static_cast<Float>(1) - static_cast<Float>(1);
} else {
for (int i = 0; i < 1000; i++) {
x = (x * x + static_cast<Float>(1)) - x * x - static_cast<Float>(1);
}
return x;
}
}
};
constexpr int block_size = 256;
// This is for NO_DISPATCHING
template <FunctorType functor_type, class T>
__global__ void no_dispatching_kernel(T** A, cudf::size_type n_rows, cudf::size_type n_cols)
{
using F = Functor<T, functor_type>;
cudf::size_type index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < n_rows) {
for (int c = 0; c < n_cols; c++) {
A[c][index] = F::f(A[c][index]);
}
index += blockDim.x * gridDim.x;
}
}
// This is for HOST_DISPATCHING
template <FunctorType functor_type, class T>
__global__ void host_dispatching_kernel(cudf::mutable_column_device_view source_column)
{
using F = Functor<T, functor_type>;
T* A = source_column.data<T>();
cudf::size_type index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < source_column.size()) {
A[index] = F::f(A[index]);
index += blockDim.x * gridDim.x;
}
}
template <FunctorType functor_type>
struct ColumnHandle {
template <typename ColumnType, CUDF_ENABLE_IF(cudf::is_rep_layout_compatible<ColumnType>())>
void operator()(cudf::mutable_column_device_view source_column, int work_per_thread)
{
cudf::detail::grid_1d grid_config{source_column.size(), block_size};
int grid_size = grid_config.num_blocks;
// Launch the kernel.
hipLaunchKernelGGL(( host_dispatching_kernel<functor_type, ColumnType>), dim3(grid_size), dim3(block_size), 0, 0, source_column);
}
template <typename ColumnType, CUDF_ENABLE_IF(not cudf::is_rep_layout_compatible<ColumnType>())>
void operator()(cudf::mutable_column_device_view source_column, int work_per_thread)
{
CUDF_FAIL("Invalid type to benchmark.");
}
};
// The following is for DEVICE_DISPATCHING:
// The dispatching is done on device. The loop loops over
// each row (across different columns). Type is dispatched each time
// a column is visited so the total number of dispatching is
// n_rows * n_cols.
template <FunctorType functor_type>
struct RowHandle {
template <typename T, CUDF_ENABLE_IF(cudf::is_rep_layout_compatible<T>())>
__device__ void operator()(cudf::mutable_column_device_view source, cudf::size_type index)
{
using F = Functor<T, functor_type>;
source.data<T>()[index] = F::f(source.data<T>()[index]);
}
template <typename T, CUDF_ENABLE_IF(not cudf::is_rep_layout_compatible<T>())>
__device__ void operator()(cudf::mutable_column_device_view source, cudf::size_type index)
{
cudf_assert(false && "Unsupported type.");
}
};
// This is for DEVICE_DISPATCHING
template <FunctorType functor_type>
__global__ void device_dispatching_kernel(cudf::mutable_table_device_view source)
{
const cudf::size_type n_rows = source.num_rows();
cudf::size_type index = threadIdx.x + blockIdx.x * blockDim.x;
while (index < n_rows) {
for (cudf::size_type i = 0; i < source.num_columns(); i++) {
cudf::type_dispatcher(
source.column(i).type(), RowHandle<functor_type>{}, source.column(i), index);
}
index += blockDim.x * gridDim.x;
} // while
}
template <FunctorType functor_type, DispatchingType dispatching_type, class T>
void launch_kernel(cudf::mutable_table_view input, T** d_ptr, int work_per_thread)
{
const cudf::size_type n_rows = input.num_rows();
const cudf::size_type n_cols = input.num_columns();
cudf::detail::grid_1d grid_config{n_rows, block_size};
int grid_size = grid_config.num_blocks;
if (dispatching_type == HOST_DISPATCHING) {
// std::vector<cudf::util::cuda::scoped_stream> v_stream(n_cols);
for (int c = 0; c < n_cols; c++) {
auto d_column = cudf::mutable_column_device_view::create(input.column(c));
cudf::type_dispatcher(
d_column->type(), ColumnHandle<functor_type>{}, *d_column, work_per_thread);
}
} else if (dispatching_type == DEVICE_DISPATCHING) {
auto d_table_view = cudf::mutable_table_device_view::create(input);
auto f = device_dispatching_kernel<functor_type>;
// Launch the kernel
hipLaunchKernelGGL(( f), dim3(grid_size), dim3(block_size), 0, 0, *d_table_view);
} else if (dispatching_type == NO_DISPATCHING) {
auto f = no_dispatching_kernel<functor_type, T>;
// Launch the kernel
hipLaunchKernelGGL(( f), dim3(grid_size), dim3(block_size), 0, 0, d_ptr, n_rows, n_cols);
}
}
template <class TypeParam, FunctorType functor_type, DispatchingType dispatching_type>
void type_dispatcher_benchmark(::benchmark::State& state)
{
const auto source_size = static_cast<cudf::size_type>(state.range(1));
const auto n_cols = static_cast<cudf::size_type>(state.range(0));
const auto work_per_thread = static_cast<cudf::size_type>(state.range(2));
auto data = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i; });
std::vector<cudf::test::fixed_width_column_wrapper<TypeParam>> source_column_wrappers;
std::vector<cudf::mutable_column_view> source_columns;
for (int i = 0; i < n_cols; ++i) {
source_column_wrappers.push_back(
cudf::test::fixed_width_column_wrapper<TypeParam>(data, data + source_size));
source_columns.push_back(source_column_wrappers[i]);
}
cudf::mutable_table_view source_table{source_columns};
// For no dispatching
std::vector<rmm::device_buffer> h_vec(n_cols);
std::vector<TypeParam*> h_vec_p(n_cols);
std::transform(h_vec.begin(), h_vec.end(), h_vec_p.begin(), [source_size](auto& col) {
col.resize(source_size * sizeof(TypeParam), rmm::cuda_stream_default);
return static_cast<TypeParam*>(col.data());
});
rmm::device_uvector<TypeParam*> d_vec(n_cols, rmm::cuda_stream_default);
if (dispatching_type == NO_DISPATCHING) {
CUDA_TRY(hipMemcpy(
d_vec.data(), h_vec_p.data(), sizeof(TypeParam*) * n_cols, hipMemcpyHostToDevice));
}
// Warm up
launch_kernel<functor_type, dispatching_type>(source_table, d_vec.data(), work_per_thread);
CUDA_TRY(hipDeviceSynchronize());
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
launch_kernel<functor_type, dispatching_type>(source_table, d_vec.data(), work_per_thread);
}
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * source_size * n_cols * 2 *
sizeof(TypeParam));
}
class TypeDispatcher : public cudf::benchmark {
};
#define TBM_BENCHMARK_DEFINE(name, TypeParam, functor_type, dispatching_type) \
BENCHMARK_DEFINE_F(TypeDispatcher, name)(::benchmark::State & state) \
{ \
type_dispatcher_benchmark<TypeParam, functor_type, dispatching_type>(state); \
} \
BENCHMARK_REGISTER_F(TypeDispatcher, name) \
->RangeMultiplier(2) \
->Ranges({{1, 8}, {1 << 10, 1 << 26}, {1, 1}}) \
->UseManualTime();
TBM_BENCHMARK_DEFINE(fp64_bandwidth_host, double, BANDWIDTH_BOUND, HOST_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_bandwidth_device, double, BANDWIDTH_BOUND, DEVICE_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_bandwidth_no, double, BANDWIDTH_BOUND, NO_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_compute_host, double, COMPUTE_BOUND, HOST_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_compute_device, double, COMPUTE_BOUND, DEVICE_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_compute_no, double, COMPUTE_BOUND, NO_DISPATCHING);
| 1881a65aa9c0616b6c2687dd3d2d970b5b7111d2.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../fixture/benchmark_fixture.hpp"
#include "../synchronization/synchronization.hpp"
#include <cudf_test/column_wrapper.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <type_traits>
enum DispatchingType { HOST_DISPATCHING, DEVICE_DISPATCHING, NO_DISPATCHING };
enum FunctorType { BANDWIDTH_BOUND, COMPUTE_BOUND };
template <class NotFloat, FunctorType ft, class DisableNotFloat = void>
struct Functor {
static __device__ NotFloat f(NotFloat x) { return x; }
};
template <class Float, FunctorType ft>
struct Functor<Float, ft, typename std::enable_if_t<std::is_floating_point<Float>::value>> {
static __device__ Float f(Float x)
{
if (ft == BANDWIDTH_BOUND) {
return x + static_cast<Float>(1) - static_cast<Float>(1);
} else {
for (int i = 0; i < 1000; i++) {
x = (x * x + static_cast<Float>(1)) - x * x - static_cast<Float>(1);
}
return x;
}
}
};
constexpr int block_size = 256;
// This is for NO_DISPATCHING
template <FunctorType functor_type, class T>
__global__ void no_dispatching_kernel(T** A, cudf::size_type n_rows, cudf::size_type n_cols)
{
using F = Functor<T, functor_type>;
cudf::size_type index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < n_rows) {
for (int c = 0; c < n_cols; c++) {
A[c][index] = F::f(A[c][index]);
}
index += blockDim.x * gridDim.x;
}
}
// This is for HOST_DISPATCHING
template <FunctorType functor_type, class T>
__global__ void host_dispatching_kernel(cudf::mutable_column_device_view source_column)
{
using F = Functor<T, functor_type>;
T* A = source_column.data<T>();
cudf::size_type index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < source_column.size()) {
A[index] = F::f(A[index]);
index += blockDim.x * gridDim.x;
}
}
template <FunctorType functor_type>
struct ColumnHandle {
template <typename ColumnType, CUDF_ENABLE_IF(cudf::is_rep_layout_compatible<ColumnType>())>
void operator()(cudf::mutable_column_device_view source_column, int work_per_thread)
{
cudf::detail::grid_1d grid_config{source_column.size(), block_size};
int grid_size = grid_config.num_blocks;
// Launch the kernel.
host_dispatching_kernel<functor_type, ColumnType><<<grid_size, block_size>>>(source_column);
}
template <typename ColumnType, CUDF_ENABLE_IF(not cudf::is_rep_layout_compatible<ColumnType>())>
void operator()(cudf::mutable_column_device_view source_column, int work_per_thread)
{
CUDF_FAIL("Invalid type to benchmark.");
}
};
// The following is for DEVICE_DISPATCHING:
// The dispatching is done on device. The loop loops over
// each row (across different columns). Type is dispatched each time
// a column is visited so the total number of dispatching is
// n_rows * n_cols.
template <FunctorType functor_type>
struct RowHandle {
template <typename T, CUDF_ENABLE_IF(cudf::is_rep_layout_compatible<T>())>
__device__ void operator()(cudf::mutable_column_device_view source, cudf::size_type index)
{
using F = Functor<T, functor_type>;
source.data<T>()[index] = F::f(source.data<T>()[index]);
}
template <typename T, CUDF_ENABLE_IF(not cudf::is_rep_layout_compatible<T>())>
__device__ void operator()(cudf::mutable_column_device_view source, cudf::size_type index)
{
cudf_assert(false && "Unsupported type.");
}
};
// This is for DEVICE_DISPATCHING
template <FunctorType functor_type>
__global__ void device_dispatching_kernel(cudf::mutable_table_device_view source)
{
const cudf::size_type n_rows = source.num_rows();
cudf::size_type index = threadIdx.x + blockIdx.x * blockDim.x;
while (index < n_rows) {
for (cudf::size_type i = 0; i < source.num_columns(); i++) {
cudf::type_dispatcher(
source.column(i).type(), RowHandle<functor_type>{}, source.column(i), index);
}
index += blockDim.x * gridDim.x;
} // while
}
template <FunctorType functor_type, DispatchingType dispatching_type, class T>
void launch_kernel(cudf::mutable_table_view input, T** d_ptr, int work_per_thread)
{
const cudf::size_type n_rows = input.num_rows();
const cudf::size_type n_cols = input.num_columns();
cudf::detail::grid_1d grid_config{n_rows, block_size};
int grid_size = grid_config.num_blocks;
if (dispatching_type == HOST_DISPATCHING) {
// std::vector<cudf::util::cuda::scoped_stream> v_stream(n_cols);
for (int c = 0; c < n_cols; c++) {
auto d_column = cudf::mutable_column_device_view::create(input.column(c));
cudf::type_dispatcher(
d_column->type(), ColumnHandle<functor_type>{}, *d_column, work_per_thread);
}
} else if (dispatching_type == DEVICE_DISPATCHING) {
auto d_table_view = cudf::mutable_table_device_view::create(input);
auto f = device_dispatching_kernel<functor_type>;
// Launch the kernel
f<<<grid_size, block_size>>>(*d_table_view);
} else if (dispatching_type == NO_DISPATCHING) {
auto f = no_dispatching_kernel<functor_type, T>;
// Launch the kernel
f<<<grid_size, block_size>>>(d_ptr, n_rows, n_cols);
}
}
template <class TypeParam, FunctorType functor_type, DispatchingType dispatching_type>
void type_dispatcher_benchmark(::benchmark::State& state)
{
const auto source_size = static_cast<cudf::size_type>(state.range(1));
const auto n_cols = static_cast<cudf::size_type>(state.range(0));
const auto work_per_thread = static_cast<cudf::size_type>(state.range(2));
auto data = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i; });
std::vector<cudf::test::fixed_width_column_wrapper<TypeParam>> source_column_wrappers;
std::vector<cudf::mutable_column_view> source_columns;
for (int i = 0; i < n_cols; ++i) {
source_column_wrappers.push_back(
cudf::test::fixed_width_column_wrapper<TypeParam>(data, data + source_size));
source_columns.push_back(source_column_wrappers[i]);
}
cudf::mutable_table_view source_table{source_columns};
// For no dispatching
std::vector<rmm::device_buffer> h_vec(n_cols);
std::vector<TypeParam*> h_vec_p(n_cols);
std::transform(h_vec.begin(), h_vec.end(), h_vec_p.begin(), [source_size](auto& col) {
col.resize(source_size * sizeof(TypeParam), rmm::cuda_stream_default);
return static_cast<TypeParam*>(col.data());
});
rmm::device_uvector<TypeParam*> d_vec(n_cols, rmm::cuda_stream_default);
if (dispatching_type == NO_DISPATCHING) {
CUDA_TRY(cudaMemcpy(
d_vec.data(), h_vec_p.data(), sizeof(TypeParam*) * n_cols, cudaMemcpyHostToDevice));
}
// Warm up
launch_kernel<functor_type, dispatching_type>(source_table, d_vec.data(), work_per_thread);
CUDA_TRY(cudaDeviceSynchronize());
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
launch_kernel<functor_type, dispatching_type>(source_table, d_vec.data(), work_per_thread);
}
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * source_size * n_cols * 2 *
sizeof(TypeParam));
}
class TypeDispatcher : public cudf::benchmark {
};
#define TBM_BENCHMARK_DEFINE(name, TypeParam, functor_type, dispatching_type) \
BENCHMARK_DEFINE_F(TypeDispatcher, name)(::benchmark::State & state) \
{ \
type_dispatcher_benchmark<TypeParam, functor_type, dispatching_type>(state); \
} \
BENCHMARK_REGISTER_F(TypeDispatcher, name) \
->RangeMultiplier(2) \
->Ranges({{1, 8}, {1 << 10, 1 << 26}, {1, 1}}) \
->UseManualTime();
TBM_BENCHMARK_DEFINE(fp64_bandwidth_host, double, BANDWIDTH_BOUND, HOST_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_bandwidth_device, double, BANDWIDTH_BOUND, DEVICE_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_bandwidth_no, double, BANDWIDTH_BOUND, NO_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_compute_host, double, COMPUTE_BOUND, HOST_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_compute_device, double, COMPUTE_BOUND, DEVICE_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_compute_no, double, COMPUTE_BOUND, NO_DISPATCHING);
|
3bd8cce254b42d3c627feb271b2122255fd3e066.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<mykernels.h>
#include <cstdio>
#include<cmath>
int dirArr[]={0,0,0,-1,0,1,-1,0,-1,-1,-1,1,1,0,1,-1,1,1};
int revArr[]={0, 2, 1, 6, 8, 7, 3, 5, 4};
float wArr[]={4.f/9.f,1.f/9.f,1.f/9.f,1.f/9.f,1.f/36.f,1.f/36.f,1.f/9.f,1.f/36.f,1.f/36.f};
__constant__ int cdirArr [18], crevArr[9];
__constant__ float cwArr[9];
__global__ void npRoll(float * arr1, float * arr2, int w, int h,int stepx, int stepy){
int i =blockIdx.x;
arr2[i*w+threadIdx.x] = arr1[((i+stepy+h)%h)*w + (threadIdx.x+stepx+w)%w];
}
__global__ void equilibrium(float * rho, float * vel,float * feq){
const int b = blockIdx.x, t = threadIdx.x;
float v;
float usq =vel[b *_Nx + t] * vel[b *_Nx + t] + vel[_N+ b *_Nx + t]*vel[_N+ b *_Nx + t];
#pragma unroll
for(int i =0;i< 9;++i){
v=cdirArr[2*i] *vel[b *_Nx + t] + cdirArr[2*i+1] *vel[_N+ b *_Nx + t];
feq[i*_N+ b *_Nx + t]= rho[b *_Nx + t] * cwArr[i] *(1.f + 3* v +4.5f*v*v- 1.5f*usq);
}
}
__global__ void equilibriumInit(float * feq){
const int b = blockIdx.x, t = threadIdx.x;
float v, vX = _ULB *(1.0+1e-2f*sin((3*(float)blockIdx.x)/(_Ny-1)*M_PI));
#pragma unroll
for(int i =0;i< 9;++i){
v=cdirArr[2*i] *vX;
feq[i*_N+ b *_Nx + t]= cwArr[i] *(1.f + 3* v +4.5f*v*v- 1.5f*vX*vX);
}
}
__global__ void findRhoAndMomentum(float* fin, float * rho, float* momentum){
const int b = blockIdx.x, t = threadIdx.x;
float r=0, m1=0,m2=0;
#pragma unroll
for(int i =0; i< 9;++i){
r+=fin[i*_N+b*_Nx+t];
m1 += fin[i*_N+b*_Nx+t]*cdirArr[2*i];
m2 += fin[i*_N+b*_Nx+t]*cdirArr[2*i+1];
}
momentum[_N+b*_Nx+t] = m2/r;
momentum[b*_Nx+t] = m1/r;
rho[b*_Nx+t] = r;
//rho[b*blockDim.x +t]=
}
__global__ void findFout(float* fin, float * feq, float* fout){
const int b = blockIdx.x, t = threadIdx.x;
#pragma unroll
for(int i =0;i< 9;++i){
fout[i * _N+b* _Nx+t] = fin[i * _N+b * _Nx+t] - (fin[i*_N+b*_Nx+t] -feq[i*_N+b*_Nx+t])/ _TAU;
}
}
__global__ void writeZeros(float* arr){
const int b = blockIdx.x, t = threadIdx.x;
arr[b*blockDim.x + t] =0;
}
__global__ void applyObstacle(float * fin, float *fout){
const int i =blockIdx.x, j = threadIdx.x;
if(( (i-_Cy)*(i-_Cy) + (j-_Cx)*(j-_Cx) < _RAD* _RAD)){
#pragma unroll
for(int k =0; k <9 ;++k){
fin[k*_N + i * _Nx + j]=fout[crevArr[k]*_N + i * _Nx + j];
}
}
}
__global__ void findResult(float * momentum, float * result){
// const int i =blockIdx.x, j = threadIdx.x;
}
__global__ void prepareResult(float* momentum, float*cresult){
const int b =blockIdx.x, t = threadIdx.x;
cresult[b*_Nx+t] = sqrt(momentum[_N+b*_Nx+t] * momentum[_N+b*_Nx+t] +momentum[b*_Nx+t] * momentum[b*_Nx+t]);
}
__global__ void copyFirstRow(float *fin, float * feq){
#pragma unroll
for(int a =0 ;a <9;++a){
fin[a*_N + threadIdx.x*_Nx] =feq[a*_N + threadIdx.x*_Nx] ;
}
}
void allocConstants(){
hipMemcpyToSymbol (cdirArr,dirArr, 18*4 );
hipMemcpyToSymbol (crevArr,revArr, 9*4 );
hipMemcpyToSymbol (cwArr,wArr, 9*4 );
} | 3bd8cce254b42d3c627feb271b2122255fd3e066.cu | #include<mykernels.h>
#include <cstdio>
#include<cmath>
int dirArr[]={0,0,0,-1,0,1,-1,0,-1,-1,-1,1,1,0,1,-1,1,1};
int revArr[]={0, 2, 1, 6, 8, 7, 3, 5, 4};
float wArr[]={4.f/9.f,1.f/9.f,1.f/9.f,1.f/9.f,1.f/36.f,1.f/36.f,1.f/9.f,1.f/36.f,1.f/36.f};
__constant__ int cdirArr [18], crevArr[9];
__constant__ float cwArr[9];
__global__ void npRoll(float * arr1, float * arr2, int w, int h,int stepx, int stepy){
int i =blockIdx.x;
arr2[i*w+threadIdx.x] = arr1[((i+stepy+h)%h)*w + (threadIdx.x+stepx+w)%w];
}
__global__ void equilibrium(float * rho, float * vel,float * feq){
const int b = blockIdx.x, t = threadIdx.x;
float v;
float usq =vel[b *_Nx + t] * vel[b *_Nx + t] + vel[_N+ b *_Nx + t]*vel[_N+ b *_Nx + t];
#pragma unroll
for(int i =0;i< 9;++i){
v=cdirArr[2*i] *vel[b *_Nx + t] + cdirArr[2*i+1] *vel[_N+ b *_Nx + t];
feq[i*_N+ b *_Nx + t]= rho[b *_Nx + t] * cwArr[i] *(1.f + 3* v +4.5f*v*v- 1.5f*usq);
}
}
__global__ void equilibriumInit(float * feq){
const int b = blockIdx.x, t = threadIdx.x;
float v, vX = _ULB *(1.0+1e-2f*sin((3*(float)blockIdx.x)/(_Ny-1)*M_PI));
#pragma unroll
for(int i =0;i< 9;++i){
v=cdirArr[2*i] *vX;
feq[i*_N+ b *_Nx + t]= cwArr[i] *(1.f + 3* v +4.5f*v*v- 1.5f*vX*vX);
}
}
__global__ void findRhoAndMomentum(float* fin, float * rho, float* momentum){
const int b = blockIdx.x, t = threadIdx.x;
float r=0, m1=0,m2=0;
#pragma unroll
for(int i =0; i< 9;++i){
r+=fin[i*_N+b*_Nx+t];
m1 += fin[i*_N+b*_Nx+t]*cdirArr[2*i];
m2 += fin[i*_N+b*_Nx+t]*cdirArr[2*i+1];
}
momentum[_N+b*_Nx+t] = m2/r;
momentum[b*_Nx+t] = m1/r;
rho[b*_Nx+t] = r;
//rho[b*blockDim.x +t]=
}
__global__ void findFout(float* fin, float * feq, float* fout){
const int b = blockIdx.x, t = threadIdx.x;
#pragma unroll
for(int i =0;i< 9;++i){
fout[i * _N+b* _Nx+t] = fin[i * _N+b * _Nx+t] - (fin[i*_N+b*_Nx+t] -feq[i*_N+b*_Nx+t])/ _TAU;
}
}
__global__ void writeZeros(float* arr){
const int b = blockIdx.x, t = threadIdx.x;
arr[b*blockDim.x + t] =0;
}
__global__ void applyObstacle(float * fin, float *fout){
const int i =blockIdx.x, j = threadIdx.x;
if(( (i-_Cy)*(i-_Cy) + (j-_Cx)*(j-_Cx) < _RAD* _RAD)){
#pragma unroll
for(int k =0; k <9 ;++k){
fin[k*_N + i * _Nx + j]=fout[crevArr[k]*_N + i * _Nx + j];
}
}
}
__global__ void findResult(float * momentum, float * result){
// const int i =blockIdx.x, j = threadIdx.x;
}
__global__ void prepareResult(float* momentum, float*cresult){
const int b =blockIdx.x, t = threadIdx.x;
cresult[b*_Nx+t] = sqrt(momentum[_N+b*_Nx+t] * momentum[_N+b*_Nx+t] +momentum[b*_Nx+t] * momentum[b*_Nx+t]);
}
__global__ void copyFirstRow(float *fin, float * feq){
#pragma unroll
for(int a =0 ;a <9;++a){
fin[a*_N + threadIdx.x*_Nx] =feq[a*_N + threadIdx.x*_Nx] ;
}
}
void allocConstants(){
cudaMemcpyToSymbol (cdirArr,dirArr, 18*4 );
cudaMemcpyToSymbol (crevArr,revArr, 9*4 );
cudaMemcpyToSymbol (cwArr,wArr, 9*4 );
} |
8c6329b1896e278eb1469f60159e86828b6b82c7.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <iostream>
#define _USE_MATH_DEFINES
#include <math.h>
#include <random>
#include <stdio.h>
#include <vector>
#include "device.h"
/** Good ol' MIN macro */
#define MIN(a, b) ((a) < (b) ? (a) : (b))
static float gaussian_noise(float mean, float sigma, std::mt19937 &rng)
{
std::normal_distribution<float> gaussian(mean, sigma);
return gaussian(rng);
}
static float probability_of_value_from_bivariate_gaussian(float x, float y, float mean_x, float mean_y, float sigma_x, float sigma_y)
{
const float rho = 0.0; // cov / (sig1 * sig2); Covariance of two independent random variables is zero.
float denom = 2.0 * M_PI * sigma_x * sigma_y * sqrt(1.0 - (rho * rho));
float A = ((x - mean_x) * (x - mean_x)) / (sigma_x * sigma_x);
float B = ((2.0 * rho * (x - mean_x) * (y - mean_y)) / (sigma_x * sigma_y));
float C = ((y - mean_y) * (y - mean_y)) / (sigma_y * sigma_y);
A /= 1000.0; // For numerical stability
C /= 1000.0; // Ditto
float z = A - B + C;
float a = (-1.0 * z) / (2.0 * (1.0 - rho * rho));
return exp(a) / denom;
}
__global__ void kernel_calculate_likelihood(int *particles_x, int *particles_y, float *weights, unsigned int nparticles, float estimate_x, float estimate_y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nparticles)
{
float x = (float)particles_x[index];
float y = (float)particles_y[index];
const float sigma_x = 2.5;
const float sigma_y = 2.5;
float mean_x = estimate_x;
float mean_y = estimate_y;
// Compute the probability of getting this x,y combo from a distribution centered at estimate_x, estimte_y.
const float rho = 0.0; // cov / (sig1 * sig2); Covariance of two independent random variables is zero.
float denom = 2.0f * M_PI * sigma_x * sigma_y * sqrt(1.0f - (rho * rho));
float A = ((x - mean_x) * (x - mean_x)) / (sigma_x * sigma_x);
float B = ((2.0f * rho * (x - mean_x) * (y - mean_y)) / (sigma_x * sigma_y));
float C = ((y - mean_y) * (y - mean_y)) / (sigma_y * sigma_y);
A /= 1000.0f; // For numerical stability
C /= 1000.0f; // Ditto
float z = A - B + C;
float a = (-1.0f * z) / (2.0f * (1.0f - rho * rho));
float prob = exp(a) / denom;
weights[index] = prob;
}
}
int device_calculate_likelihood(const int *particles_x, const int *particles_y, int estimate_x, int estimate_y, float *weights, unsigned int nparticles, int nthreads_per_block)
{
hipError_t err;
int *dev_particles_x = nullptr;
int *dev_particles_y = nullptr;
float *dev_weights = nullptr;
#define CHECK_CUDA_ERR(err) do { if (err != hipSuccess) { err = (hipError_t)__LINE__; goto fail; }} while (0)
/* Malloc all the device memory we need */
err = hipMalloc(&dev_particles_x, nparticles * sizeof(int));
CHECK_CUDA_ERR(err);
err = hipMalloc(&dev_particles_y, nparticles * sizeof(int));
CHECK_CUDA_ERR(err);
err = hipMalloc(&dev_weights, nparticles * sizeof(float));
CHECK_CUDA_ERR(err);
/* Copy arrays onto device */
err = hipMemcpy(dev_particles_x, particles_x, nparticles * sizeof(int), hipMemcpyHostToDevice);
CHECK_CUDA_ERR(err);
err = hipMemcpy(dev_particles_y, particles_y, nparticles * sizeof(int), hipMemcpyHostToDevice);
CHECK_CUDA_ERR(err);
err = hipMemcpy(dev_weights, weights, nparticles * sizeof(float), hipMemcpyHostToDevice);
CHECK_CUDA_ERR(err);
/* Call the kernel */
hipLaunchKernelGGL(( kernel_calculate_likelihood), dim3(ceil(nparticles / (float)nthreads_per_block)), dim3(nthreads_per_block), 0, 0, dev_particles_x, dev_particles_y, dev_weights, nparticles, estimate_x, estimate_y);
err = hipDeviceSynchronize();
CHECK_CUDA_ERR(err);
/* Copy array back onto host */
err = hipMemcpy(weights, dev_weights, nparticles * sizeof(float), hipMemcpyDeviceToHost);
CHECK_CUDA_ERR(err);
/* Deallocate the device arrays */
err = hipFree(dev_particles_x);
CHECK_CUDA_ERR(err);
err = hipFree(dev_particles_y);
CHECK_CUDA_ERR(err);
err = hipFree(dev_weights);
CHECK_CUDA_ERR(err);
#undef CHECK_CUDA_ERR
fail:
assert(err == hipSuccess);
return (int)err;
}
static void move_particles(int estimated_vx, int estimated_vy, unsigned int nparticles, int *particles_x, int *particles_y, float *particles_weights, std::mt19937 &rng)
{
for (unsigned int i = 0; i < nparticles; i++)
{
static const float sigma = 2.5;
float vx = gaussian_noise(estimated_vx, sigma, rng);
float vy = gaussian_noise(estimated_vy, sigma, rng);
particles_x[i] += vx;
particles_y[i] += vy;
particles_weights[i] = probability_of_value_from_bivariate_gaussian(vx, vy, estimated_vx, estimated_vy, sigma, sigma);
}
}
static void sort_particles_by_weight_in_place(unsigned int *indices, unsigned int nparticles, float *particles_weights, int *particles_x, int *particles_y)
{
// Sort the indices
std::sort(indices, indices + nparticles, SortIndices(particles_weights));
// Make copies of the three arrays (gross)
int *xcpy = (int *)malloc(sizeof(int) * nparticles);
int *ycpy = (int *)malloc(sizeof(int) * nparticles);
float *wcpy = (float *)malloc(sizeof(float) * nparticles);
memcpy(xcpy, particles_x, sizeof(int) * nparticles);
memcpy(ycpy, particles_y, sizeof(int) * nparticles);
memcpy(wcpy, particles_weights, sizeof(float) * nparticles);
// Sort each array according to the sorted indices
for (unsigned int i = 0; i < nparticles; i++)
{
particles_weights[i] = wcpy[indices[i]];
particles_x[i] = xcpy[indices[i]];
particles_y[i] = ycpy[indices[i]];
}
free(xcpy);
free(ycpy);
free(wcpy);
xcpy = nullptr;
ycpy = nullptr;
wcpy = nullptr;
}
static void normalize_weights(unsigned int nparticles, float *particles_weights)
{
float sum = 0.0;
for (unsigned int i = 0; i < nparticles; i++)
{
sum += particles_weights[i];
}
if (sum > 0.0)
{
for (unsigned int i = 0; i < nparticles; i++)
{
particles_weights[i] /= sum;
assert((particles_weights[i] >= 0.0) && (particles_weights[i] <= 1.0));
}
}
}
static void complete_resample_and_move_step(unsigned int nparticles, float *particles_weights, std::mt19937 &rng, unsigned int *indices, int *particles_x, int *particles_y, int estimated_vx, int estimated_vy)
{
// Create a distribution I will need
auto dist = std::uniform_real_distribution<float>(0.0, 1.0);
std::uniform_int_distribution<std::mt19937::result_type> height_distribution;
std::uniform_int_distribution<std::mt19937::result_type> width_distribution;
// Create the new particles in vectors
std::vector<int> pxs;
std::vector<int> pys;
// Sort the particles by weight (in reverse - heaviest at the front of the array)
//sort_particles_by_weight_in_place(indices, nparticles, particles_weights, particles_x, particles_y);
// Align a CMF (cumulative mass function) array, where each bin is the sum of all previous weights
std::vector<float> cmf;
float acc_prob_mass = 0.0;
for (unsigned int i = 0; i < nparticles; i++)
{
acc_prob_mass += particles_weights[i];
cmf.push_back(acc_prob_mass);
}
// Do a search into the CMF to find the place where our randomly generated probability (0 to 1) fits
for (unsigned int i = 0; i < nparticles; i++)
{
float p = dist(rng);
assert((p <= 1.0) && (p >= 0.0));
int cmf_index = -1;
for (unsigned int j = 0; j < nparticles; j++)
{
// Search for where the generated probability belongs
if (p <= cmf[j])
{
cmf_index = j;
break;
}
}
if (cmf_index >= 0)
{
pxs.push_back(particles_x[cmf_index]);
pys.push_back(particles_y[cmf_index]);
}
else
{
// Probabilities are all zero. Resample from uniform.
pxs.push_back(width_distribution(rng));
pys.push_back(height_distribution(rng));
}
}
// Now overwrite the current batch of particles with the new ones
for (unsigned int i = 0; i < nparticles; i++)
{
particles_x[i] = pxs[i];
particles_y[i] = pys[i];
}
// Reset all weights
for (unsigned int i = 0; i < nparticles; i++)
{
particles_weights[i] = 0.0;
}
// Move particles
for (unsigned int i = 0; i < nparticles; i++)
{
static const float sigma = 2.5;
float vx = gaussian_noise(estimated_vx, sigma, rng);
float vy = gaussian_noise(estimated_vy, sigma, rng);
particles_x[i] += vx;
particles_y[i] += vy;
particles_weights[i] = probability_of_value_from_bivariate_gaussian(vx, vy, estimated_vx, estimated_vy, sigma, sigma);
}
}
static void resample_particles(unsigned int nparticles, float *particles_weights, std::mt19937 &rng, unsigned int *indices, int *particles_x, int *particles_y)
{
// Create a distribution I will need
auto dist = std::uniform_real_distribution<float>(0.0, 1.0);
std::uniform_int_distribution<std::mt19937::result_type> height_distribution;
std::uniform_int_distribution<std::mt19937::result_type> width_distribution;
// Create the new particles in vectors
std::vector<int> pxs;
std::vector<int> pys;
// Normalize the weights so that each one is between 0 and 1
normalize_weights(nparticles, particles_weights);
// Sort the particles by weight (in reverse - heaviest at the front of the array)
sort_particles_by_weight_in_place(indices, nparticles, particles_weights, particles_x, particles_y);
// Align a CMF (cumulative mass function) array, where each bin is the sum of all previous weights
std::vector<float> cmf;
float acc_prob_mass = 0.0;
for (unsigned int i = 0; i < nparticles; i++)
{
acc_prob_mass += particles_weights[i];
cmf.push_back(acc_prob_mass);
}
// Do a search into the CMF to find the place where our randomly generated probability (0 to 1) fits
for (unsigned int i = 0; i < nparticles; i++)
{
float p = dist(rng);
assert((p <= 1.0) && (p >= 0.0));
int cmf_index = -1;
for (unsigned int j = 0; j < nparticles; j++)
{
// Search for where the generated probability belongs
if (p <= cmf[j])
{
cmf_index = j;
break;
}
}
if (cmf_index >= 0)
{
pxs.push_back(particles_x[cmf_index]);
pys.push_back(particles_y[cmf_index]);
}
else
{
// Probabilities are all zero. Resample from uniform.
pxs.push_back(width_distribution(rng));
pys.push_back(height_distribution(rng));
}
}
// Now overwrite the current batch of particles with the new ones
for (unsigned int i = 0; i < nparticles; i++)
{
particles_x[i] = pxs[i];
particles_y[i] = pys[i];
}
}
__global__ void kernel_normalize_weights_reduction(unsigned int nparticles, float *dev_weights, float *intermediate)
{
// Dynamically-sized shared memory buffer for the reduction (this should be no smaller than blockDim.x)
extern __shared__ float tmp[];
unsigned int index = blockDim.x * blockIdx.x + threadIdx.x;
// load all weights in this block into temp array
if (index < nparticles)
{
tmp[threadIdx.x] = dev_weights[index];
}
__syncthreads();
// Now do a binary sum tree to reduce to a single accumulated total weight
for (unsigned int stride = 1; stride < nparticles; stride *= 2)
{
if ((index < nparticles) && (threadIdx.x >= stride))
{
tmp[threadIdx.x] += tmp[threadIdx.x - stride];
}
__syncthreads();
}
// Each block now needs to add its total to its index in intermediate
// So determine which thread should do this, since we only need one
// item from each block
bool lastusefulthread;
if (blockIdx.x == (gridDim.x - 1))
{
// If my block index is that of the final block, then I am
// the thread responsible for the last useful item if
// my index is that of the final particle
lastusefulthread = (index == (nparticles - 1));
}
else
{
// If my block is not the final one, then I am
// the thread responsible for the last useful item if
// my index is that of the final item in this block
lastusefulthread = (threadIdx.x == (blockDim.x - 1));
}
if (lastusefulthread)
{
intermediate[blockIdx.x] = tmp[threadIdx.x];
}
}
__global__ void kernel_normalize_weights_complete(unsigned int nparticles, float *dev_weights, float summed_weights)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
// Divide all weights by sum in parallel
if ((index < nparticles) && summed_weights > 0.0f)
{
dev_weights[index] /= summed_weights;
}
}
__device__ void kernel_sequential_merge(float *tmpbuf_weights, float *weights_a, float *weights_b,
int *tmpbuf_x, int *x_a, int *x_b,
int *tmpbuf_y, int *y_a, int *y_b,
unsigned int len_arr_a, unsigned int len_arr_b)
{
// Sorts backwards (largest first)
unsigned int i = 0;
unsigned int j = 0;
while ((i < len_arr_a) && (j < len_arr_b))
{
float wa = weights_a[i];
float wb = weights_b[j];
if (wa > wb)
{
tmpbuf_weights[i + j] = weights_a[i];
tmpbuf_x[i + j] = x_a[i];
tmpbuf_y[i + j] = y_a[i];
i++;
}
else
{
tmpbuf_weights[i + j] = weights_b[j];
tmpbuf_x[i + j] = x_b[j];
tmpbuf_y[i + j] = y_b[j];
j++;
}
}
// Now add the rest from whichever array is not done
if (j < len_arr_b)
{
memcpy(&tmpbuf_weights[i + j], &weights_b[j], sizeof(unsigned int) * (len_arr_b - j));
memcpy(&tmpbuf_x[i + j], &x_b[j], sizeof(int) * (len_arr_b - j));
memcpy(&tmpbuf_y[i + j], &y_b[j], sizeof(int) * (len_arr_b - j));
}
else if (i < len_arr_a)
{
memcpy(&tmpbuf_weights[i + j], &weights_a[i], sizeof(unsigned int) * (len_arr_a - i));
memcpy(&tmpbuf_x[i + j], &x_a[i], sizeof(int) * (len_arr_a - i));
memcpy(&tmpbuf_y[i + j], &y_b[j], sizeof(int) * (len_arr_b - j));
}
}
// The most naive parallel merge sort possible - quite possibly worse than sequential // TODO do better
__global__ void kernel_sort_particles(unsigned int nparticles, int *particles_x, int *particles_y, float *particles_weights,
int *tmpbuf_x, int *tmpbuf_y, float *tmpbuf_weights)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nparticles)
{
// Every other thread merges their input with their neighbor
// Binary reduction merge
for (unsigned int stride = 1; stride < nparticles; stride *= 2)
{
if (index >= stride)
{
// The first (stride / 2) elements are sorted and the second (stride / 2) elements are sorted
// The second half though may be less than stride / 2, if we are at the end of the reduction.
unsigned int len_arr_a = ceil(stride / 2.0f);
unsigned int len_arr_b = MIN(ceil(stride / 2.0f), nparticles - len_arr_a);
unsigned int start_a = index - stride;
unsigned int start_b = start_a + len_arr_a;
// Merge
float *weights_a = &particles_weights[start_a];
float *weights_b = &particles_weights[start_b];
int *x_a = &particles_x[start_a];
int *x_b = &particles_x[start_b];
int *y_a = &particles_y[start_a];
int *y_b = &particles_y[start_b];
// Since each thread is writing to the same global array, we need to make sure they are only
// writing to their appropriate subsection.
// The start of each thread's output array should be given by the following formula.
unsigned int tmpbuf_start = (index - 1) * (2 * stride);
kernel_sequential_merge(&tmpbuf_weights[tmpbuf_start], weights_a, weights_b, &tmpbuf_x[tmpbuf_start], x_a, x_b, &tmpbuf_y[tmpbuf_start], y_a, y_b, len_arr_a, len_arr_b);
}
// Since we are doing a reduction, we need to make sure each thread is done before moving on.
__syncthreads();
}
}
}
int device_resample_and_move(int estimated_vx, int estimated_vy, unsigned int nparticles, int *particles_x, int *particles_y, float *particles_weights, std::mt19937 &rng, unsigned int *indices, int nthreads_per_block)
{
#if 1
hipError_t err;
int *dev_particles_x = nullptr;
int *dev_particles_y = nullptr;
float *dev_weights = nullptr;
unsigned int *dev_indices = nullptr;
float *dev_sum_tmp = nullptr; // The temporary results from each block during sum
float *dev_sort_weights_tmp = nullptr;
int *dev_sort_x_tmp = nullptr;
int *dev_sort_y_tmp = nullptr;
float *sum_tmp = nullptr;
float summed_weights = 0.0;
int nblocks = ceil(nparticles / (float)nthreads_per_block);
#define CHECK_CUDA_ERR(err) do { if (err != hipSuccess) { err = (hipError_t)__LINE__; goto fail; }} while (0)
/* Allocate everything we need */
err = hipMalloc(&dev_particles_x, nparticles * sizeof(int));
CHECK_CUDA_ERR(err);
err = hipMalloc(&dev_particles_y, nparticles * sizeof(int));
CHECK_CUDA_ERR(err);
err = hipMalloc(&dev_weights, nparticles * sizeof(float));
CHECK_CUDA_ERR(err);
err = hipMalloc(&dev_indices, nparticles * sizeof(unsigned int));
CHECK_CUDA_ERR(err);
err = hipMalloc(&dev_sum_tmp, nblocks * sizeof(float));
CHECK_CUDA_ERR(err);
err = hipMalloc(&dev_sort_weights_tmp, nparticles * sizeof(float));
CHECK_CUDA_ERR(err);
err = hipMalloc(&dev_sort_x_tmp, nparticles * sizeof(int));
CHECK_CUDA_ERR(err);
err = hipMalloc(&dev_sort_y_tmp, nparticles * sizeof(int));
CHECK_CUDA_ERR(err);
/* Copy everything to the device */
err = hipMemcpy(dev_particles_x, particles_x, nparticles * sizeof(int), hipMemcpyHostToDevice);
CHECK_CUDA_ERR(err);
err = hipMemcpy(dev_particles_y, particles_y, nparticles * sizeof(int), hipMemcpyHostToDevice);
CHECK_CUDA_ERR(err);
err = hipMemcpy(dev_weights, particles_weights, nparticles * sizeof(float), hipMemcpyHostToDevice);
CHECK_CUDA_ERR(err);
err = hipMemcpy(dev_indices, indices, nparticles * sizeof(unsigned int), hipMemcpyHostToDevice);
CHECK_CUDA_ERR(err);
/* Launch kernels */
hipLaunchKernelGGL(( kernel_normalize_weights_reduction), dim3(nblocks), dim3(nthreads_per_block), (sizeof(float) * nthreads_per_block), 0, nparticles, dev_weights, dev_sum_tmp);
err = hipDeviceSynchronize();
CHECK_CUDA_ERR(err);
// Sequential sum of the intermediate results in dev_sum_tmp
sum_tmp = (float *)malloc(nblocks * sizeof(float));
err = hipMemcpy(sum_tmp, dev_sum_tmp, nblocks * sizeof(float), hipMemcpyDeviceToHost);
CHECK_CUDA_ERR(err);
for (unsigned int i = 0; i < nblocks; i++)
{
summed_weights += sum_tmp[i];
}
free(sum_tmp);
sum_tmp = nullptr;
hipLaunchKernelGGL(( kernel_normalize_weights_complete), dim3(nblocks), dim3(nthreads_per_block), 0, 0, nparticles, dev_weights, summed_weights);
err = hipDeviceSynchronize();
CHECK_CUDA_ERR(err);
hipLaunchKernelGGL(( kernel_sort_particles), dim3(nblocks), dim3(nthreads_per_block), 0, 0, nparticles, dev_particles_x, dev_particles_y, dev_weights, dev_sort_x_tmp, dev_sort_y_tmp, dev_sort_weights_tmp);
err = hipDeviceSynchronize();
CHECK_CUDA_ERR(err);
free(dev_sort_y_tmp);
free(dev_sort_x_tmp);
free(dev_sort_weights_tmp);
dev_sort_y_tmp = nullptr;
dev_sort_x_tmp = nullptr;
dev_sort_weights_tmp = nullptr;
//kernel_resample_particles
//kernel_reset_all_weights
//kernel_move_particles
/* Transfer results back to host */
err = hipMemcpy(particles_x, dev_particles_x, nparticles * sizeof(int), hipMemcpyDeviceToHost);
CHECK_CUDA_ERR(err);
err = hipMemcpy(particles_y, dev_particles_y, nparticles * sizeof(int), hipMemcpyDeviceToHost);
CHECK_CUDA_ERR(err);
err = hipMemcpy(particles_weights, dev_weights, nparticles * sizeof(float), hipMemcpyDeviceToHost);
CHECK_CUDA_ERR(err);
err = hipMemcpy(indices, dev_indices, nparticles * sizeof(unsigned int), hipMemcpyDeviceToHost);
CHECK_CUDA_ERR(err);
//&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&//
//Remove the logic here as you convert it to CUDA
//&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&//
complete_resample_and_move_step(nparticles, particles_weights, rng, indices, particles_x, particles_y, estimated_vx, estimated_vy);
//&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&//
// End
//&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&//
/* Free up memory */
err = hipFree(dev_particles_x);
dev_particles_x = nullptr;
CHECK_CUDA_ERR(err);
err = hipFree(dev_particles_y);
dev_particles_y = nullptr;
CHECK_CUDA_ERR(err);
err = hipFree(dev_weights);
dev_weights = nullptr;
CHECK_CUDA_ERR(err);
err = hipFree(dev_indices);
dev_indices = nullptr;
CHECK_CUDA_ERR(err);
err = hipFree(dev_sum_tmp);
dev_sum_tmp = nullptr;
CHECK_CUDA_ERR(err);
#undef CHECK_CUDA_ERR
fail:
if (err != hipSuccess)
{
std::cout << "Error at line " << err << std::endl;
assert(false);
}
return err;
#else
// Resample from weights
resample_particles(nparticles, particles_weights, rng, indices, particles_x, particles_y);
// Reset all weights
for (unsigned int i = 0; i < nparticles; i++)
{
particles_weights[i] = 0.0;
}
// Move all particles according to our movement model (plus Gaussian noise)
// Also update weights based on how likely the movements were
move_particles(estimated_vx, estimated_vy, nparticles, particles_x, particles_y, particles_weights, rng);
return 0;
#endif
}
| 8c6329b1896e278eb1469f60159e86828b6b82c7.cu | #include <algorithm>
#include <assert.h>
#include <cuda_runtime.h>
#include <iostream>
#define _USE_MATH_DEFINES
#include <math.h>
#include <random>
#include <stdio.h>
#include <vector>
#include "device.h"
/** Good ol' MIN macro */
#define MIN(a, b) ((a) < (b) ? (a) : (b))
static float gaussian_noise(float mean, float sigma, std::mt19937 &rng)
{
std::normal_distribution<float> gaussian(mean, sigma);
return gaussian(rng);
}
static float probability_of_value_from_bivariate_gaussian(float x, float y, float mean_x, float mean_y, float sigma_x, float sigma_y)
{
const float rho = 0.0; // cov / (sig1 * sig2); Covariance of two independent random variables is zero.
float denom = 2.0 * M_PI * sigma_x * sigma_y * sqrt(1.0 - (rho * rho));
float A = ((x - mean_x) * (x - mean_x)) / (sigma_x * sigma_x);
float B = ((2.0 * rho * (x - mean_x) * (y - mean_y)) / (sigma_x * sigma_y));
float C = ((y - mean_y) * (y - mean_y)) / (sigma_y * sigma_y);
A /= 1000.0; // For numerical stability
C /= 1000.0; // Ditto
float z = A - B + C;
float a = (-1.0 * z) / (2.0 * (1.0 - rho * rho));
return exp(a) / denom;
}
__global__ void kernel_calculate_likelihood(int *particles_x, int *particles_y, float *weights, unsigned int nparticles, float estimate_x, float estimate_y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nparticles)
{
float x = (float)particles_x[index];
float y = (float)particles_y[index];
const float sigma_x = 2.5;
const float sigma_y = 2.5;
float mean_x = estimate_x;
float mean_y = estimate_y;
// Compute the probability of getting this x,y combo from a distribution centered at estimate_x, estimte_y.
const float rho = 0.0; // cov / (sig1 * sig2); Covariance of two independent random variables is zero.
float denom = 2.0f * M_PI * sigma_x * sigma_y * sqrt(1.0f - (rho * rho));
float A = ((x - mean_x) * (x - mean_x)) / (sigma_x * sigma_x);
float B = ((2.0f * rho * (x - mean_x) * (y - mean_y)) / (sigma_x * sigma_y));
float C = ((y - mean_y) * (y - mean_y)) / (sigma_y * sigma_y);
A /= 1000.0f; // For numerical stability
C /= 1000.0f; // Ditto
float z = A - B + C;
float a = (-1.0f * z) / (2.0f * (1.0f - rho * rho));
float prob = exp(a) / denom;
weights[index] = prob;
}
}
int device_calculate_likelihood(const int *particles_x, const int *particles_y, int estimate_x, int estimate_y, float *weights, unsigned int nparticles, int nthreads_per_block)
{
cudaError_t err;
int *dev_particles_x = nullptr;
int *dev_particles_y = nullptr;
float *dev_weights = nullptr;
#define CHECK_CUDA_ERR(err) do { if (err != cudaSuccess) { err = (cudaError_t)__LINE__; goto fail; }} while (0)
/* Malloc all the device memory we need */
err = cudaMalloc(&dev_particles_x, nparticles * sizeof(int));
CHECK_CUDA_ERR(err);
err = cudaMalloc(&dev_particles_y, nparticles * sizeof(int));
CHECK_CUDA_ERR(err);
err = cudaMalloc(&dev_weights, nparticles * sizeof(float));
CHECK_CUDA_ERR(err);
/* Copy arrays onto device */
err = cudaMemcpy(dev_particles_x, particles_x, nparticles * sizeof(int), cudaMemcpyHostToDevice);
CHECK_CUDA_ERR(err);
err = cudaMemcpy(dev_particles_y, particles_y, nparticles * sizeof(int), cudaMemcpyHostToDevice);
CHECK_CUDA_ERR(err);
err = cudaMemcpy(dev_weights, weights, nparticles * sizeof(float), cudaMemcpyHostToDevice);
CHECK_CUDA_ERR(err);
/* Call the kernel */
kernel_calculate_likelihood<<<ceil(nparticles / (float)nthreads_per_block), nthreads_per_block>>>(dev_particles_x, dev_particles_y, dev_weights, nparticles, estimate_x, estimate_y);
err = cudaDeviceSynchronize();
CHECK_CUDA_ERR(err);
/* Copy array back onto host */
err = cudaMemcpy(weights, dev_weights, nparticles * sizeof(float), cudaMemcpyDeviceToHost);
CHECK_CUDA_ERR(err);
/* Deallocate the device arrays */
err = cudaFree(dev_particles_x);
CHECK_CUDA_ERR(err);
err = cudaFree(dev_particles_y);
CHECK_CUDA_ERR(err);
err = cudaFree(dev_weights);
CHECK_CUDA_ERR(err);
#undef CHECK_CUDA_ERR
fail:
assert(err == cudaSuccess);
return (int)err;
}
static void move_particles(int estimated_vx, int estimated_vy, unsigned int nparticles, int *particles_x, int *particles_y, float *particles_weights, std::mt19937 &rng)
{
for (unsigned int i = 0; i < nparticles; i++)
{
static const float sigma = 2.5;
float vx = gaussian_noise(estimated_vx, sigma, rng);
float vy = gaussian_noise(estimated_vy, sigma, rng);
particles_x[i] += vx;
particles_y[i] += vy;
particles_weights[i] = probability_of_value_from_bivariate_gaussian(vx, vy, estimated_vx, estimated_vy, sigma, sigma);
}
}
static void sort_particles_by_weight_in_place(unsigned int *indices, unsigned int nparticles, float *particles_weights, int *particles_x, int *particles_y)
{
// Sort the indices
std::sort(indices, indices + nparticles, SortIndices(particles_weights));
// Make copies of the three arrays (gross)
int *xcpy = (int *)malloc(sizeof(int) * nparticles);
int *ycpy = (int *)malloc(sizeof(int) * nparticles);
float *wcpy = (float *)malloc(sizeof(float) * nparticles);
memcpy(xcpy, particles_x, sizeof(int) * nparticles);
memcpy(ycpy, particles_y, sizeof(int) * nparticles);
memcpy(wcpy, particles_weights, sizeof(float) * nparticles);
// Sort each array according to the sorted indices
for (unsigned int i = 0; i < nparticles; i++)
{
particles_weights[i] = wcpy[indices[i]];
particles_x[i] = xcpy[indices[i]];
particles_y[i] = ycpy[indices[i]];
}
free(xcpy);
free(ycpy);
free(wcpy);
xcpy = nullptr;
ycpy = nullptr;
wcpy = nullptr;
}
static void normalize_weights(unsigned int nparticles, float *particles_weights)
{
float sum = 0.0;
for (unsigned int i = 0; i < nparticles; i++)
{
sum += particles_weights[i];
}
if (sum > 0.0)
{
for (unsigned int i = 0; i < nparticles; i++)
{
particles_weights[i] /= sum;
assert((particles_weights[i] >= 0.0) && (particles_weights[i] <= 1.0));
}
}
}
static void complete_resample_and_move_step(unsigned int nparticles, float *particles_weights, std::mt19937 &rng, unsigned int *indices, int *particles_x, int *particles_y, int estimated_vx, int estimated_vy)
{
// Create a distribution I will need
auto dist = std::uniform_real_distribution<float>(0.0, 1.0);
std::uniform_int_distribution<std::mt19937::result_type> height_distribution;
std::uniform_int_distribution<std::mt19937::result_type> width_distribution;
// Create the new particles in vectors
std::vector<int> pxs;
std::vector<int> pys;
// Sort the particles by weight (in reverse - heaviest at the front of the array)
//sort_particles_by_weight_in_place(indices, nparticles, particles_weights, particles_x, particles_y);
// Align a CMF (cumulative mass function) array, where each bin is the sum of all previous weights
std::vector<float> cmf;
float acc_prob_mass = 0.0;
for (unsigned int i = 0; i < nparticles; i++)
{
acc_prob_mass += particles_weights[i];
cmf.push_back(acc_prob_mass);
}
// Do a search into the CMF to find the place where our randomly generated probability (0 to 1) fits
for (unsigned int i = 0; i < nparticles; i++)
{
float p = dist(rng);
assert((p <= 1.0) && (p >= 0.0));
int cmf_index = -1;
for (unsigned int j = 0; j < nparticles; j++)
{
// Search for where the generated probability belongs
if (p <= cmf[j])
{
cmf_index = j;
break;
}
}
if (cmf_index >= 0)
{
pxs.push_back(particles_x[cmf_index]);
pys.push_back(particles_y[cmf_index]);
}
else
{
// Probabilities are all zero. Resample from uniform.
pxs.push_back(width_distribution(rng));
pys.push_back(height_distribution(rng));
}
}
// Now overwrite the current batch of particles with the new ones
for (unsigned int i = 0; i < nparticles; i++)
{
particles_x[i] = pxs[i];
particles_y[i] = pys[i];
}
// Reset all weights
for (unsigned int i = 0; i < nparticles; i++)
{
particles_weights[i] = 0.0;
}
// Move particles
for (unsigned int i = 0; i < nparticles; i++)
{
static const float sigma = 2.5;
float vx = gaussian_noise(estimated_vx, sigma, rng);
float vy = gaussian_noise(estimated_vy, sigma, rng);
particles_x[i] += vx;
particles_y[i] += vy;
particles_weights[i] = probability_of_value_from_bivariate_gaussian(vx, vy, estimated_vx, estimated_vy, sigma, sigma);
}
}
static void resample_particles(unsigned int nparticles, float *particles_weights, std::mt19937 &rng, unsigned int *indices, int *particles_x, int *particles_y)
{
// Create a distribution I will need
auto dist = std::uniform_real_distribution<float>(0.0, 1.0);
std::uniform_int_distribution<std::mt19937::result_type> height_distribution;
std::uniform_int_distribution<std::mt19937::result_type> width_distribution;
// Create the new particles in vectors
std::vector<int> pxs;
std::vector<int> pys;
// Normalize the weights so that each one is between 0 and 1
normalize_weights(nparticles, particles_weights);
// Sort the particles by weight (in reverse - heaviest at the front of the array)
sort_particles_by_weight_in_place(indices, nparticles, particles_weights, particles_x, particles_y);
// Align a CMF (cumulative mass function) array, where each bin is the sum of all previous weights
std::vector<float> cmf;
float acc_prob_mass = 0.0;
for (unsigned int i = 0; i < nparticles; i++)
{
acc_prob_mass += particles_weights[i];
cmf.push_back(acc_prob_mass);
}
// Do a search into the CMF to find the place where our randomly generated probability (0 to 1) fits
for (unsigned int i = 0; i < nparticles; i++)
{
float p = dist(rng);
assert((p <= 1.0) && (p >= 0.0));
int cmf_index = -1;
for (unsigned int j = 0; j < nparticles; j++)
{
// Search for where the generated probability belongs
if (p <= cmf[j])
{
cmf_index = j;
break;
}
}
if (cmf_index >= 0)
{
pxs.push_back(particles_x[cmf_index]);
pys.push_back(particles_y[cmf_index]);
}
else
{
// Probabilities are all zero. Resample from uniform.
pxs.push_back(width_distribution(rng));
pys.push_back(height_distribution(rng));
}
}
// Now overwrite the current batch of particles with the new ones
for (unsigned int i = 0; i < nparticles; i++)
{
particles_x[i] = pxs[i];
particles_y[i] = pys[i];
}
}
__global__ void kernel_normalize_weights_reduction(unsigned int nparticles, float *dev_weights, float *intermediate)
{
// Dynamically-sized shared memory buffer for the reduction (this should be no smaller than blockDim.x)
extern __shared__ float tmp[];
unsigned int index = blockDim.x * blockIdx.x + threadIdx.x;
// load all weights in this block into temp array
if (index < nparticles)
{
tmp[threadIdx.x] = dev_weights[index];
}
__syncthreads();
// Now do a binary sum tree to reduce to a single accumulated total weight
for (unsigned int stride = 1; stride < nparticles; stride *= 2)
{
if ((index < nparticles) && (threadIdx.x >= stride))
{
tmp[threadIdx.x] += tmp[threadIdx.x - stride];
}
__syncthreads();
}
// Each block now needs to add its total to its index in intermediate
// So determine which thread should do this, since we only need one
// item from each block
bool lastusefulthread;
if (blockIdx.x == (gridDim.x - 1))
{
// If my block index is that of the final block, then I am
// the thread responsible for the last useful item if
// my index is that of the final particle
lastusefulthread = (index == (nparticles - 1));
}
else
{
// If my block is not the final one, then I am
// the thread responsible for the last useful item if
// my index is that of the final item in this block
lastusefulthread = (threadIdx.x == (blockDim.x - 1));
}
if (lastusefulthread)
{
intermediate[blockIdx.x] = tmp[threadIdx.x];
}
}
__global__ void kernel_normalize_weights_complete(unsigned int nparticles, float *dev_weights, float summed_weights)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
// Divide all weights by sum in parallel
if ((index < nparticles) && summed_weights > 0.0f)
{
dev_weights[index] /= summed_weights;
}
}
__device__ void kernel_sequential_merge(float *tmpbuf_weights, float *weights_a, float *weights_b,
int *tmpbuf_x, int *x_a, int *x_b,
int *tmpbuf_y, int *y_a, int *y_b,
unsigned int len_arr_a, unsigned int len_arr_b)
{
// Sorts backwards (largest first)
unsigned int i = 0;
unsigned int j = 0;
while ((i < len_arr_a) && (j < len_arr_b))
{
float wa = weights_a[i];
float wb = weights_b[j];
if (wa > wb)
{
tmpbuf_weights[i + j] = weights_a[i];
tmpbuf_x[i + j] = x_a[i];
tmpbuf_y[i + j] = y_a[i];
i++;
}
else
{
tmpbuf_weights[i + j] = weights_b[j];
tmpbuf_x[i + j] = x_b[j];
tmpbuf_y[i + j] = y_b[j];
j++;
}
}
// Now add the rest from whichever array is not done
if (j < len_arr_b)
{
memcpy(&tmpbuf_weights[i + j], &weights_b[j], sizeof(unsigned int) * (len_arr_b - j));
memcpy(&tmpbuf_x[i + j], &x_b[j], sizeof(int) * (len_arr_b - j));
memcpy(&tmpbuf_y[i + j], &y_b[j], sizeof(int) * (len_arr_b - j));
}
else if (i < len_arr_a)
{
memcpy(&tmpbuf_weights[i + j], &weights_a[i], sizeof(unsigned int) * (len_arr_a - i));
memcpy(&tmpbuf_x[i + j], &x_a[i], sizeof(int) * (len_arr_a - i));
memcpy(&tmpbuf_y[i + j], &y_b[j], sizeof(int) * (len_arr_b - j));
}
}
// The most naive parallel merge sort possible - quite possibly worse than sequential // TODO do better
__global__ void kernel_sort_particles(unsigned int nparticles, int *particles_x, int *particles_y, float *particles_weights,
int *tmpbuf_x, int *tmpbuf_y, float *tmpbuf_weights)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nparticles)
{
// Every other thread merges their input with their neighbor
// Binary reduction merge
for (unsigned int stride = 1; stride < nparticles; stride *= 2)
{
if (index >= stride)
{
// The first (stride / 2) elements are sorted and the second (stride / 2) elements are sorted
// The second half though may be less than stride / 2, if we are at the end of the reduction.
unsigned int len_arr_a = ceil(stride / 2.0f);
unsigned int len_arr_b = MIN(ceil(stride / 2.0f), nparticles - len_arr_a);
unsigned int start_a = index - stride;
unsigned int start_b = start_a + len_arr_a;
// Merge
float *weights_a = &particles_weights[start_a];
float *weights_b = &particles_weights[start_b];
int *x_a = &particles_x[start_a];
int *x_b = &particles_x[start_b];
int *y_a = &particles_y[start_a];
int *y_b = &particles_y[start_b];
// Since each thread is writing to the same global array, we need to make sure they are only
// writing to their appropriate subsection.
// The start of each thread's output array should be given by the following formula.
unsigned int tmpbuf_start = (index - 1) * (2 * stride);
kernel_sequential_merge(&tmpbuf_weights[tmpbuf_start], weights_a, weights_b, &tmpbuf_x[tmpbuf_start], x_a, x_b, &tmpbuf_y[tmpbuf_start], y_a, y_b, len_arr_a, len_arr_b);
}
// Since we are doing a reduction, we need to make sure each thread is done before moving on.
__syncthreads();
}
}
}
int device_resample_and_move(int estimated_vx, int estimated_vy, unsigned int nparticles, int *particles_x, int *particles_y, float *particles_weights, std::mt19937 &rng, unsigned int *indices, int nthreads_per_block)
{
#if 1
cudaError_t err;
int *dev_particles_x = nullptr;
int *dev_particles_y = nullptr;
float *dev_weights = nullptr;
unsigned int *dev_indices = nullptr;
float *dev_sum_tmp = nullptr; // The temporary results from each block during sum
float *dev_sort_weights_tmp = nullptr;
int *dev_sort_x_tmp = nullptr;
int *dev_sort_y_tmp = nullptr;
float *sum_tmp = nullptr;
float summed_weights = 0.0;
int nblocks = ceil(nparticles / (float)nthreads_per_block);
#define CHECK_CUDA_ERR(err) do { if (err != cudaSuccess) { err = (cudaError_t)__LINE__; goto fail; }} while (0)
/* Allocate everything we need */
err = cudaMalloc(&dev_particles_x, nparticles * sizeof(int));
CHECK_CUDA_ERR(err);
err = cudaMalloc(&dev_particles_y, nparticles * sizeof(int));
CHECK_CUDA_ERR(err);
err = cudaMalloc(&dev_weights, nparticles * sizeof(float));
CHECK_CUDA_ERR(err);
err = cudaMalloc(&dev_indices, nparticles * sizeof(unsigned int));
CHECK_CUDA_ERR(err);
err = cudaMalloc(&dev_sum_tmp, nblocks * sizeof(float));
CHECK_CUDA_ERR(err);
err = cudaMalloc(&dev_sort_weights_tmp, nparticles * sizeof(float));
CHECK_CUDA_ERR(err);
err = cudaMalloc(&dev_sort_x_tmp, nparticles * sizeof(int));
CHECK_CUDA_ERR(err);
err = cudaMalloc(&dev_sort_y_tmp, nparticles * sizeof(int));
CHECK_CUDA_ERR(err);
/* Copy everything to the device */
err = cudaMemcpy(dev_particles_x, particles_x, nparticles * sizeof(int), cudaMemcpyHostToDevice);
CHECK_CUDA_ERR(err);
err = cudaMemcpy(dev_particles_y, particles_y, nparticles * sizeof(int), cudaMemcpyHostToDevice);
CHECK_CUDA_ERR(err);
err = cudaMemcpy(dev_weights, particles_weights, nparticles * sizeof(float), cudaMemcpyHostToDevice);
CHECK_CUDA_ERR(err);
err = cudaMemcpy(dev_indices, indices, nparticles * sizeof(unsigned int), cudaMemcpyHostToDevice);
CHECK_CUDA_ERR(err);
/* Launch kernels */
kernel_normalize_weights_reduction<<<nblocks, nthreads_per_block, (sizeof(float) * nthreads_per_block)>>>(nparticles, dev_weights, dev_sum_tmp);
err = cudaDeviceSynchronize();
CHECK_CUDA_ERR(err);
// Sequential sum of the intermediate results in dev_sum_tmp
sum_tmp = (float *)malloc(nblocks * sizeof(float));
err = cudaMemcpy(sum_tmp, dev_sum_tmp, nblocks * sizeof(float), cudaMemcpyDeviceToHost);
CHECK_CUDA_ERR(err);
for (unsigned int i = 0; i < nblocks; i++)
{
summed_weights += sum_tmp[i];
}
free(sum_tmp);
sum_tmp = nullptr;
kernel_normalize_weights_complete<<<nblocks, nthreads_per_block>>>(nparticles, dev_weights, summed_weights);
err = cudaDeviceSynchronize();
CHECK_CUDA_ERR(err);
kernel_sort_particles<<<nblocks, nthreads_per_block>>>(nparticles, dev_particles_x, dev_particles_y, dev_weights, dev_sort_x_tmp, dev_sort_y_tmp, dev_sort_weights_tmp);
err = cudaDeviceSynchronize();
CHECK_CUDA_ERR(err);
free(dev_sort_y_tmp);
free(dev_sort_x_tmp);
free(dev_sort_weights_tmp);
dev_sort_y_tmp = nullptr;
dev_sort_x_tmp = nullptr;
dev_sort_weights_tmp = nullptr;
//kernel_resample_particles
//kernel_reset_all_weights
//kernel_move_particles
/* Transfer results back to host */
err = cudaMemcpy(particles_x, dev_particles_x, nparticles * sizeof(int), cudaMemcpyDeviceToHost);
CHECK_CUDA_ERR(err);
err = cudaMemcpy(particles_y, dev_particles_y, nparticles * sizeof(int), cudaMemcpyDeviceToHost);
CHECK_CUDA_ERR(err);
err = cudaMemcpy(particles_weights, dev_weights, nparticles * sizeof(float), cudaMemcpyDeviceToHost);
CHECK_CUDA_ERR(err);
err = cudaMemcpy(indices, dev_indices, nparticles * sizeof(unsigned int), cudaMemcpyDeviceToHost);
CHECK_CUDA_ERR(err);
//&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&//
//Remove the logic here as you convert it to CUDA
//&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&//
complete_resample_and_move_step(nparticles, particles_weights, rng, indices, particles_x, particles_y, estimated_vx, estimated_vy);
//&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&//
// End
//&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&//
/* Free up memory */
err = cudaFree(dev_particles_x);
dev_particles_x = nullptr;
CHECK_CUDA_ERR(err);
err = cudaFree(dev_particles_y);
dev_particles_y = nullptr;
CHECK_CUDA_ERR(err);
err = cudaFree(dev_weights);
dev_weights = nullptr;
CHECK_CUDA_ERR(err);
err = cudaFree(dev_indices);
dev_indices = nullptr;
CHECK_CUDA_ERR(err);
err = cudaFree(dev_sum_tmp);
dev_sum_tmp = nullptr;
CHECK_CUDA_ERR(err);
#undef CHECK_CUDA_ERR
fail:
if (err != cudaSuccess)
{
std::cout << "Error at line " << err << std::endl;
assert(false);
}
return err;
#else
// Resample from weights
resample_particles(nparticles, particles_weights, rng, indices, particles_x, particles_y);
// Reset all weights
for (unsigned int i = 0; i < nparticles; i++)
{
particles_weights[i] = 0.0;
}
// Move all particles according to our movement model (plus Gaussian noise)
// Also update weights based on how likely the movements were
move_particles(estimated_vx, estimated_vy, nparticles, particles_x, particles_y, particles_weights, rng);
return 0;
#endif
}
|
9e5a75ad21649a6b250e289a233e9e8f0c3774c4.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <cassert>
#if __CUDA_ARCH__ < 350
template <typename T>
__forceinline__ __device__ T __ldg(const T* data) {
return data[0];
}
#endif
__forceinline__ __device__ float Sigmoid(float x) {
return 1.0f / (1.0f + expf(-x));
}
__forceinline__ __device__ float SigmoidDer(float x) {
const float p = 1.0f / (1.0f + expf(-x));
return p * (1.0f - p);
}
__global__ void PolynomProbsImpl(
const float* features,
int batchSize,
const int* splits,
const float* conditions,
const int* polynomOffsets,
int polynomCount,
float lambda,
float* probs) {
if (threadIdx.x < batchSize) {
int polynomId = blockIdx.x;
features += threadIdx.x;
probs += threadIdx.x;
while (polynomId < polynomCount) {
int offset = polynomOffsets[polynomId];
int nextOffset = polynomOffsets[polynomId + 1];
const int depth = nextOffset - offset;
// bool isTrue = true;
float logProb = 0;
for (int i = 0; i < depth; ++i) {
const int f = __ldg(splits + offset + i);
const float c = __ldg(conditions + offset + i);
const float x = __ldg(features + f * batchSize);
const float val = -lambda * (x - c);
// isTrue = x <= c? false : isTrue;
const float expVal = 1.0f + expf(val);
// p( split = 1) = 1.0 / (1.0 + exp(-(x - c)))
// c = 0, x= inf, p = 1.0 / (1.0 + exp(-inf) = 0
// log(p) = -log(1.0 + exp(-(x - c))
const float isTrueLogProb = isfinite(expVal) ? log(expVal) : val;
logProb -= isTrueLogProb;
}
const float prob = expf(logProb);
// const float prob = isTrue ? 1 : 0;//exp(logProb);
probs[polynomId * batchSize] = prob;
polynomId += gridDim.x;
}
}
}
//batch size should be equal to BlockSize
//we need to reduce polynoms for each output dim
__global__ void PolynomForwardImpl(
const float* probs,
int batchSize,
const float* values,
int polynomCount,
int outputDim,
float* out) {
//out: batch_elem0 dim0, dim1, dimk batch_elem1 dim0 dim1 dimk
//so threads
int polynomId = blockIdx.x;
const int dimId = blockIdx.y;
int tid = threadIdx.x;
if (tid >= batchSize) {
return;
}
float sum = 0;
probs += threadIdx.x;
values += dimId;
while (polynomId < polynomCount) {
const float polynomProb = __ldg(probs + polynomId * batchSize);
const float out = __ldg(values + polynomId * outputDim);
sum += polynomProb * out;
polynomId += gridDim.x;
}
atomicAdd(out + dimId * batchSize + threadIdx.x, sum);
}
//
//
void PolynomForward(
const float lambda,
const float* features,
int fCount,
int batchSize,
const int* splits,
const float* conditions,
const int* polynomOffsets,
const float* values,
int polynomCount,
int outDim,
float* tempProbs,
float* output
) {
const int blockSize = batchSize;
const int numBlocks = min(polynomCount, 1000);
assert(batchSize < 2048);
assert(numBlocks);
PolynomProbsImpl << < numBlocks, blockSize >>> (features, batchSize, splits, conditions, polynomOffsets, polynomCount, lambda, tempProbs);
dim3 forwardBlocks;
forwardBlocks.z = 1;
forwardBlocks.y = outDim;
forwardBlocks.x = min(polynomCount, 512);
PolynomForwardImpl << < forwardBlocks, batchSize >> > (tempProbs, batchSize, values, polynomCount, outDim, output);
}
//
//
/*
* Here layout is not the same as in forward pass
* BlockSize = 256, MaxDepth = 8, K = 24
* should give 50% occupancy, this should be enough
*/
template <int MaxDepth, int BlockSize, int K>
__global__ void PolynomBackwardImpl(float lambda,
const float* features,
int featuresCount,
const float* outDer,
int outputDim,
const int* featureIds,
const float* conditions,
const float* values,
const int* polynomOffsets,
int polynomCount,
float* featuresDer) {
const int sampleId = blockIdx.y;
features += sampleId * featuresCount;
featuresDer += sampleId * featuresCount;
outDer += sampleId * outputDim;
//out: batch_elem0 dim0, dim1, dimk batch_elem1 dim0 dim1 dimk
//so threads
__shared__ float localFeaturesDer[BlockSize * K];
for (int i = threadIdx.x; i < BlockSize * K; i += BlockSize) {
localFeaturesDer[i] = 0;
}
__syncthreads();
const int alignedFeaturesCount = ((featuresCount + BlockSize - 1) / BlockSize) * BlockSize;
const int memoryBlocks = (BlockSize * K) / alignedFeaturesCount;
const int memoryBlockId = threadIdx.x % memoryBlocks;
int polynomId = blockIdx.x * blockDim.x + threadIdx.x;
while (polynomId < polynomCount) {
const int offset = polynomOffsets[polynomId];
const int nextOffset = polynomOffsets[polynomId + 1];
const int depth = nextOffset - offset;
if (depth != 0) {
float logProbs[MaxDepth];
short fids[MaxDepth];
float totalLogProb = 0;
#pragma unroll
for (int i = 0; i < MaxDepth; ++i) {
if (i < depth) {
const int f = __ldg(featureIds + i + offset);
fids[i] = f;
const float c = __ldg(conditions + i + offset);
const float x = __ldg(features + f);
const float val = -lambda * (x - c);
const float expVal = 1.0f + exp(val);
logProbs[i] = -(isfinite(expVal) ? log(expVal) : val);
totalLogProb += logProbs[i];
}
}
const float p = exp(totalLogProb);
//featureDerivative is outputDer * total value before monom * monom derivative
float derMultiplier = 0;
#pragma unroll 10
for (int dim = 0; dim < outputDim; ++dim) {
derMultiplier += __ldg(values + polynomId * outputDim + dim) * __ldg(outDer + dim);
}
#pragma unroll
for (int i = 0; i < MaxDepth; ++i) {
if (i < depth) {
const int f = fids[i];
const float featureProb = exp(logProbs[i]);
const float monomDer = p * (1.0 - featureProb);
const float featureDer = monomDer * derMultiplier;
//atomics in shared memory, pretty fast on pascal+ hardware
atomicAdd(localFeaturesDer + memoryBlocks * f + memoryBlockId, featureDer);
}
}
}
polynomId += gridDim.x * blockDim.x;
}
__syncthreads();
//outputDim = 1024 => memoryBlocks = 6
for (int f = threadIdx.x; f < featuresCount; f += BlockSize) {
float der = 0;
#pragma unroll
for (int k = 0; k < memoryBlocks; ++k) {
der += localFeaturesDer[f * memoryBlocks + k];
}
atomicAdd(featuresDer + f, der);
}
}
void PolynomBackward(int batchSize,
float lambda,
const float* features,
int featuresCount,
const float* outDer,
int outputDim,
const int* featureIds,
const float* conditions,
const float* values,
const int* polynomOffsets,
int polynomCount,
float* featuresDer) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.z = 1;
numBlocks.y = batchSize;
//should be smCount * 6 / batchSize
numBlocks.x = min((polynomCount + blockSize - 1) * outputDim / blockSize, 160);
const int maxDepth = 12;
const int K = 16;
hipLaunchKernelGGL(( PolynomBackwardImpl<maxDepth, blockSize, K>) , dim3(numBlocks), dim3(blockSize), 0, 0, lambda, features, featuresCount, outDer, outputDim, featureIds, conditions, values, polynomOffsets, polynomCount, featuresDer);
}
//
| 9e5a75ad21649a6b250e289a233e9e8f0c3774c4.cu | #include <cuda_runtime.h>
#include <iostream>
#include <cassert>
#if __CUDA_ARCH__ < 350
template <typename T>
__forceinline__ __device__ T __ldg(const T* data) {
return data[0];
}
#endif
__forceinline__ __device__ float Sigmoid(float x) {
return 1.0f / (1.0f + expf(-x));
}
__forceinline__ __device__ float SigmoidDer(float x) {
const float p = 1.0f / (1.0f + expf(-x));
return p * (1.0f - p);
}
__global__ void PolynomProbsImpl(
const float* features,
int batchSize,
const int* splits,
const float* conditions,
const int* polynomOffsets,
int polynomCount,
float lambda,
float* probs) {
if (threadIdx.x < batchSize) {
int polynomId = blockIdx.x;
features += threadIdx.x;
probs += threadIdx.x;
while (polynomId < polynomCount) {
int offset = polynomOffsets[polynomId];
int nextOffset = polynomOffsets[polynomId + 1];
const int depth = nextOffset - offset;
// bool isTrue = true;
float logProb = 0;
for (int i = 0; i < depth; ++i) {
const int f = __ldg(splits + offset + i);
const float c = __ldg(conditions + offset + i);
const float x = __ldg(features + f * batchSize);
const float val = -lambda * (x - c);
// isTrue = x <= c? false : isTrue;
const float expVal = 1.0f + expf(val);
// p( split = 1) = 1.0 / (1.0 + exp(-(x - c)))
// c = 0, x= inf, p = 1.0 / (1.0 + exp(-inf) = 0
// log(p) = -log(1.0 + exp(-(x - c))
const float isTrueLogProb = isfinite(expVal) ? log(expVal) : val;
logProb -= isTrueLogProb;
}
const float prob = expf(logProb);
// const float prob = isTrue ? 1 : 0;//exp(logProb);
probs[polynomId * batchSize] = prob;
polynomId += gridDim.x;
}
}
}
//batch size should be equal to BlockSize
//we need to reduce polynoms for each output dim
__global__ void PolynomForwardImpl(
const float* probs,
int batchSize,
const float* values,
int polynomCount,
int outputDim,
float* out) {
//out: batch_elem0 dim0, dim1, dimk batch_elem1 dim0 dim1 dimk
//so threads
int polynomId = blockIdx.x;
const int dimId = blockIdx.y;
int tid = threadIdx.x;
if (tid >= batchSize) {
return;
}
float sum = 0;
probs += threadIdx.x;
values += dimId;
while (polynomId < polynomCount) {
const float polynomProb = __ldg(probs + polynomId * batchSize);
const float out = __ldg(values + polynomId * outputDim);
sum += polynomProb * out;
polynomId += gridDim.x;
}
atomicAdd(out + dimId * batchSize + threadIdx.x, sum);
}
//
//
void PolynomForward(
const float lambda,
const float* features,
int fCount,
int batchSize,
const int* splits,
const float* conditions,
const int* polynomOffsets,
const float* values,
int polynomCount,
int outDim,
float* tempProbs,
float* output
) {
const int blockSize = batchSize;
const int numBlocks = min(polynomCount, 1000);
assert(batchSize < 2048);
assert(numBlocks);
PolynomProbsImpl << < numBlocks, blockSize >>> (features, batchSize, splits, conditions, polynomOffsets, polynomCount, lambda, tempProbs);
dim3 forwardBlocks;
forwardBlocks.z = 1;
forwardBlocks.y = outDim;
forwardBlocks.x = min(polynomCount, 512);
PolynomForwardImpl << < forwardBlocks, batchSize >> > (tempProbs, batchSize, values, polynomCount, outDim, output);
}
//
//
/*
* Here layout is not the same as in forward pass
* BlockSize = 256, MaxDepth = 8, K = 24
* should give 50% occupancy, this should be enough
*/
template <int MaxDepth, int BlockSize, int K>
__global__ void PolynomBackwardImpl(float lambda,
const float* features,
int featuresCount,
const float* outDer,
int outputDim,
const int* featureIds,
const float* conditions,
const float* values,
const int* polynomOffsets,
int polynomCount,
float* featuresDer) {
const int sampleId = blockIdx.y;
features += sampleId * featuresCount;
featuresDer += sampleId * featuresCount;
outDer += sampleId * outputDim;
//out: batch_elem0 dim0, dim1, dimk batch_elem1 dim0 dim1 dimk
//so threads
__shared__ float localFeaturesDer[BlockSize * K];
for (int i = threadIdx.x; i < BlockSize * K; i += BlockSize) {
localFeaturesDer[i] = 0;
}
__syncthreads();
const int alignedFeaturesCount = ((featuresCount + BlockSize - 1) / BlockSize) * BlockSize;
const int memoryBlocks = (BlockSize * K) / alignedFeaturesCount;
const int memoryBlockId = threadIdx.x % memoryBlocks;
int polynomId = blockIdx.x * blockDim.x + threadIdx.x;
while (polynomId < polynomCount) {
const int offset = polynomOffsets[polynomId];
const int nextOffset = polynomOffsets[polynomId + 1];
const int depth = nextOffset - offset;
if (depth != 0) {
float logProbs[MaxDepth];
short fids[MaxDepth];
float totalLogProb = 0;
#pragma unroll
for (int i = 0; i < MaxDepth; ++i) {
if (i < depth) {
const int f = __ldg(featureIds + i + offset);
fids[i] = f;
const float c = __ldg(conditions + i + offset);
const float x = __ldg(features + f);
const float val = -lambda * (x - c);
const float expVal = 1.0f + exp(val);
logProbs[i] = -(isfinite(expVal) ? log(expVal) : val);
totalLogProb += logProbs[i];
}
}
const float p = exp(totalLogProb);
//featureDerivative is outputDer * total value before monom * monom derivative
float derMultiplier = 0;
#pragma unroll 10
for (int dim = 0; dim < outputDim; ++dim) {
derMultiplier += __ldg(values + polynomId * outputDim + dim) * __ldg(outDer + dim);
}
#pragma unroll
for (int i = 0; i < MaxDepth; ++i) {
if (i < depth) {
const int f = fids[i];
const float featureProb = exp(logProbs[i]);
const float monomDer = p * (1.0 - featureProb);
const float featureDer = monomDer * derMultiplier;
//atomics in shared memory, pretty fast on pascal+ hardware
atomicAdd(localFeaturesDer + memoryBlocks * f + memoryBlockId, featureDer);
}
}
}
polynomId += gridDim.x * blockDim.x;
}
__syncthreads();
//outputDim = 1024 => memoryBlocks = 6
for (int f = threadIdx.x; f < featuresCount; f += BlockSize) {
float der = 0;
#pragma unroll
for (int k = 0; k < memoryBlocks; ++k) {
der += localFeaturesDer[f * memoryBlocks + k];
}
atomicAdd(featuresDer + f, der);
}
}
void PolynomBackward(int batchSize,
float lambda,
const float* features,
int featuresCount,
const float* outDer,
int outputDim,
const int* featureIds,
const float* conditions,
const float* values,
const int* polynomOffsets,
int polynomCount,
float* featuresDer) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.z = 1;
numBlocks.y = batchSize;
//should be ≈ smCount * 6 / batchSize
numBlocks.x = min((polynomCount + blockSize - 1) * outputDim / blockSize, 160);
const int maxDepth = 12;
const int K = 16;
PolynomBackwardImpl<maxDepth, blockSize, K> <<<numBlocks, blockSize>>>(lambda, features, featuresCount, outDer, outputDim, featureIds, conditions, values, polynomOffsets, polynomCount, featuresDer);
}
//
|
a5c0fda636d86336756785874fcc85ca3c920deb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ float Sat(float r, float g, float b){
float min = fmin(fmin(r, g), b);
float max = fmax(fmax(r, g), b);
float delta = max - min;
float S = max != 0.0f ? delta / max : 0.0f;
return S;
}
__global__ void FilmGradeKernelC( float* p_Input, int p_Width, int p_Height, float p_ContR, float p_ContG, float p_ContB, float p_SatR, float p_SatG, float p_SatB, float p_ContP) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < p_Width && y < p_Height) {
const int index = (y * p_Width + x) * 4;
float contR = (p_Input[index] - p_ContP) * p_ContR + p_ContP;
float contG = (p_Input[index + 1] - p_ContP) * p_ContG + p_ContP;
float contB = (p_Input[index + 2] - p_ContP) * p_ContB + p_ContP;
float luma = contR * 0.2126f + contG * 0.7152f + contB * 0.0722f;
float outR = (1.0f - (p_SatR * 0.2126f + p_SatG * 0.7152f + p_SatB * 0.0722f)) * luma + contR * p_SatR;
float outG = (1.0f - (p_SatR * 0.2126f + p_SatG * 0.7152f + p_SatB * 0.0722f)) * luma + contG * p_SatG;
float outB = (1.0f - (p_SatR * 0.2126f + p_SatG * 0.7152f + p_SatB * 0.0722f)) * luma + contB * p_SatB;
p_Input[index] = outR;
p_Input[index + 1] = outG;
p_Input[index + 2] = outB;
}} | a5c0fda636d86336756785874fcc85ca3c920deb.cu | #include "includes.h"
__device__ float Sat(float r, float g, float b){
float min = fmin(fmin(r, g), b);
float max = fmax(fmax(r, g), b);
float delta = max - min;
float S = max != 0.0f ? delta / max : 0.0f;
return S;
}
__global__ void FilmGradeKernelC( float* p_Input, int p_Width, int p_Height, float p_ContR, float p_ContG, float p_ContB, float p_SatR, float p_SatG, float p_SatB, float p_ContP) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < p_Width && y < p_Height) {
const int index = (y * p_Width + x) * 4;
float contR = (p_Input[index] - p_ContP) * p_ContR + p_ContP;
float contG = (p_Input[index + 1] - p_ContP) * p_ContG + p_ContP;
float contB = (p_Input[index + 2] - p_ContP) * p_ContB + p_ContP;
float luma = contR * 0.2126f + contG * 0.7152f + contB * 0.0722f;
float outR = (1.0f - (p_SatR * 0.2126f + p_SatG * 0.7152f + p_SatB * 0.0722f)) * luma + contR * p_SatR;
float outG = (1.0f - (p_SatR * 0.2126f + p_SatG * 0.7152f + p_SatB * 0.0722f)) * luma + contG * p_SatG;
float outB = (1.0f - (p_SatR * 0.2126f + p_SatG * 0.7152f + p_SatB * 0.0722f)) * luma + contB * p_SatB;
p_Input[index] = outR;
p_Input[index + 1] = outG;
p_Input[index + 2] = outB;
}} |
ca0a98dfe9f59be6bbf769c94f65435a77eb1fb5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "main.h"
// g++ -D BIRCH1 -g mainOMP.cpp -std=c++11 -O3 -msse4.2 -fopenmp -o birch1 -lm
// random_device rd;
// mt19937 gen(rd());
// unsigned int max_val = gen.max();
int numThreads = 1;
__constant__ double dev_centers_global[NUM_CLUSTER*DIMENSION]; // For using constant memory
int main(int argc, char const *argv[])
{
// Currently no argument processing logic, will always run birch1 for 2 times with N=10k
srand(time(NULL));
int numRuns,method;
int N = 0;
// for k-means parallel
int rounds = 5;
double oversampling = NUM_CLUSTER;
oversampling = 2*oversampling;
char dataFileName[100];
char mode[100];
char baseLogFile[100];
char resultFile[100];
sprintf(dataFileName,"%s%s","../data/",DATA);
sprintf(mode,"%s",argv[1]);
sprintf(baseLogFile,"../logs/%s/%s_",DATA,mode);
numRuns = atoi(argv[2]);
method = -1;
numThreads = 1;
if(getenv("OMP_NUM_THREADS") != NULL)
{
numThreads = atoi(getenv("OMP_NUM_THREADS"));
printf("numThreads as gotten from env::%d\n",numThreads);
if(numThreads == 0)
{
numThreads = 1;
}
}
else
{
printf("numThreads as gotten by default::%d\n",numThreads);
}
if(strcmp(mode,"random")==0)
{
method = 0;
}
if(strcmp(mode,"kmeans++")==0)
{
method = 1;
}
if(strcmp(mode,"d2-seeding")==0)
{
method = 2;
N = floor(NUM_CLUSTER * atof(argv[3]));
sprintf(baseLogFile,"%sN=%sk_",baseLogFile,argv[3]);
}
if(strcmp(mode,"kmeans-par")==0)
{
method = 3;
oversampling = NUM_CLUSTER * atof(argv[3]);
rounds = atoi(argv[4]);
sprintf(baseLogFile,"%sl=%sk_r=%d_",baseLogFile,argv[3],rounds);
}
// base log file name for individual runs
sprintf(baseLogFile,"%sthreads=%d_",baseLogFile,numThreads);
// log file for combined results. Mean and standard deviations
sprintf(resultFile,"%sresult.txt",baseLogFile);
sprintf(baseLogFile,"%srunNo=",baseLogFile);
struct timeval start,end;
// collect stats about all relevant parameters
double initTime[numRuns];
double iterTime[numRuns];
double totalTime[numRuns];
double initCost[numRuns];
double finalCost[numRuns];
double numIter[numRuns];
// read the data into a vector of "vector"
double* data;
FILE* reader;
int i = 0,j = 0;
data = (double*)malloc(NUM_POINTS*DIMENSION*sizeof(double));
reader = fopen(dataFileName,"r");
while(i < NUM_POINTS)
{
j = 0;
while(j < DIMENSION)
{
fscanf(reader,"\t%lf",&(data[i*DIMENSION + j]));
j++;
}
i++;
}
// Copy data onto device memory
double* dev_data;
hipMalloc((void**)&dev_data,DIMENSION*NUM_POINTS*sizeof(double));
hipMemcpy(dev_data,data,DIMENSION*NUM_POINTS*sizeof(double),hipMemcpyHostToDevice);
FILE* logger;
int runNum;
for(runNum = 0; runNum < numRuns ; runNum++)
{
double samplingTime_1[NUM_CLUSTER];
double samplingTime_2[NUM_CLUSTER];
printf("Running runNum::%d\n",runNum );
gettimeofday(&start,NULL);
int numBlocks = 8;
int numThreadsPerBlock = 1024;
int numSampleBlocks = 128;
int numSampleTperB = 32;
int numGPUThreads = numBlocks*numThreadsPerBlock;
// double* distances_debug = (double*)malloc(NUM_POINTS*sizeof(double));
double* distances; // Using page-locked memory for distances
hipHostMalloc((void**)&distances,NUM_POINTS*sizeof(double),hipHostMallocDefault);
double* centers = (double*)malloc(NUM_CLUSTER*DIMENSION*sizeof(double));
double* rnd = (double*)malloc(2*N*sizeof(double));
double* multiset = (double*)malloc(N*DIMENSION*sizeof(double));
double* partition_sums = (double*)malloc(numGPUThreads*sizeof(double));
// double* partition_sums_debug = (double*)malloc(numGPUThreads*sizeof(double));
int* sampled_indices = (int*)malloc(N*sizeof(int));
double* dev_distances;
double* dev_partition_sums;
double* dev_rnd;
int* dev_sampled_indices;
// double* dev_centers; // When not using constant memory for centers
checkCudaErrors(hipMalloc((void**)&dev_distances,NUM_POINTS*sizeof(double)));
checkCudaErrors(hipMalloc((void**)&dev_partition_sums,numGPUThreads*sizeof(double)));
checkCudaErrors(hipMalloc((void**)&dev_sampled_indices,N*sizeof(int)));
checkCudaErrors(hipMalloc((void**)&dev_rnd,2*N*sizeof(double)));
// checkCudaErrors(hipMalloc((void**)&dev_centers,NUM_CLUSTER*DIMENSION*sizeof(double))); // No need when using constant memory
// initialize the initial centers
if(method == 2) // d2-seeding
{
// ---------------------- GPU-Based Implementation Start ------------------------------------
hipProfilerStart();
// First choosing the first point uniformly at random, no need to sample N points and all here
int tempPointIndex = (((double) rand())/RAND_MAX)*NUM_POINTS;
memcpy(centers, data+tempPointIndex*DIMENSION, DIMENSION*sizeof(double));
checkCudaErrors(hipMemcpyToSymbol(dev_centers_global, data+tempPointIndex*DIMENSION, DIMENSION*sizeof(double),0,hipMemcpyHostToDevice));
// checkCudaErrors(hipMemcpy(dev_centers, data+tempPointIndex*DIMENSION, DIMENSION*sizeof(double),hipMemcpyHostToDevice));
double compDistTime = 0, makeCumulativeTime = 0, samplingTime = 0, meanHeuristicTime = 0;
for(i = 1; i < NUM_CLUSTER; i++)
{
struct timeval sample_start,sample_end;
gettimeofday(&sample_start,NULL);
for(j = 0; j < N; ++j)
{
rnd[2*j] = ((double) rand())/RAND_MAX;
rnd[2*j+1] = ((double) rand())/RAND_MAX;
}
hipMemcpy(dev_rnd,rnd,2*N*sizeof(double),hipMemcpyHostToDevice);// Can be overlapped with computation
// comp_dist<<<numBlocks,numThreadsPerBlock>>>(dev_data, dev_distances, dev_partition_sums, dev_centers, i, NUM_POINTS, DIMENSION, numGPUThreads);
// For blocked access pattern
// comp_dist_glbl<<<numBlocks,numThreadsPerBlock>>>(dev_data, dev_distances, dev_partition_sums, i, NUM_POINTS, DIMENSION, numGPUThreads);
// hipMemcpy(partition_sums,dev_partition_sums,numGPUThreads*sizeof(double),hipMemcpyDeviceToHost);
// for (j = 1; j < numGPUThreads; ++j) // Need to do this scan operation on GPU only, but testing things first
// {
// partition_sums[j] += partition_sums[j-1];
// }
// hipMemcpy(dev_partition_sums,partition_sums,numGPUThreads*sizeof(double),hipMemcpyHostToDevice);
// int per_thread = (NUM_POINTS + numGPUThreads-1)/numGPUThreads;
// sample_from_distribution_gpu<<<numSampleBlocks,numSampleTperB>>>(dev_partition_sums, dev_distances, dev_sampled_indices, dev_rnd, per_thread, NUM_POINTS, N);
// For strided memory access pattern
hipLaunchKernelGGL(( comp_dist_glbl_strided), dim3(numBlocks),dim3(numThreadsPerBlock), 0, 0, dev_data, dev_distances, dev_partition_sums, i, NUM_POINTS, DIMENSION, numGPUThreads);
hipMemcpy(distances,dev_distances,NUM_POINTS*sizeof(double),hipMemcpyDeviceToHost);
for (j = 1; j < NUM_POINTS; ++j)
{
distances[j] += distances[j-1];
}
hipMemcpy(dev_distances,distances,NUM_POINTS*sizeof(double),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sample_from_distribution_gpu_strided), dim3(numSampleBlocks),dim3(numSampleTperB), 0, 0, dev_distances, dev_sampled_indices, dev_rnd, NUM_POINTS, N);
// // Division of distance array into blocks so that sampling is similar to blocked cost calculation approach
// int per_thread = (NUM_POINTS + numGPUThreads-1)/numGPUThreads;
// hipMemcpy(distances,dev_distances,NUM_POINTS*sizeof(double),hipMemcpyDeviceToHost);
// double prev_val = distances[0],prev_part_val=0;
// int p_ctr = 0;
// for (j = 1; j < NUM_POINTS; ++j)
// {
// distances[j] += prev_val;
// prev_val = distances[j];
// if ((j+1)%per_thread == 0)
// {
// partition_sums[p_ctr] = distances[j] + prev_part_val;
// prev_part_val = partition_sums[p_ctr];
// p_ctr += 1;
// prev_val = 0;
// }
// else if (j == NUM_POINTS -1)
// {
// partition_sums[p_ctr] = distances[j] + prev_part_val;
// prev_part_val = partition_sums[p_ctr];
// p_ctr += 1;
// prev_val = 0;
// }
// }
// hipMemcpy(dev_distances,distances,NUM_POINTS*sizeof(double),hipMemcpyHostToDevice);
// hipMemcpy(dev_partition_sums,partition_sums,numGPUThreads*sizeof(double),hipMemcpyHostToDevice);
// sample_from_distribution_gpu<<<numSampleBlocks,numSampleTperB>>>(dev_partition_sums, dev_distances, dev_sampled_indices, dev_rnd, per_thread, NUM_POINTS, N);
// Copy back indices of sampled points, no need to copy those points as we have the data here as well
hipMemcpy(sampled_indices,dev_sampled_indices,N*sizeof(int),hipMemcpyDeviceToHost);
for (int copy_i = 0; copy_i < N; ++copy_i)
{
int index = sampled_indices[copy_i];
for (int copy_j = 0; copy_j < DIMENSION; ++copy_j)
{
multiset[copy_i*DIMENSION + copy_j] = data[index*DIMENSION + copy_j];
}
}
gettimeofday(&sample_end,NULL);
compDistTime += get_time_diff(sample_start,sample_end);
// Code for sampling on CPU (first GPU implementation)
// // copy back to host memory for sampling purpose,
// hipMemcpy(distances,dev_distances,NUM_POINTS*sizeof(double),hipMemcpyDeviceToHost);
// hipMemcpy(partition_sums,dev_partition_sums,numGPUThreads*sizeof(double),hipMemcpyDeviceToHost);
// // Make it cumulative for sampling purpose, can be done on GPU as well
// // Already made cumulative above
// gettimeofday(&sample_start,NULL);
// for (j = 1; j < numGPUThreads; ++j)
// {
// partition_sums[j] += partition_sums[j-1];
// }
// gettimeofday(&sample_end,NULL);
// makeCumulativeTime += get_time_diff(sample_start,sample_end);
// int per_thread = (NUM_POINTS + numGPUThreads-1)/numGPUThreads;
// gettimeofday(&sample_start,NULL);
// for(j = 0 ; j < N ; j++)
// {
// rnd[2*j] = ((double) rand())/RAND_MAX;
// rnd[2*j+1] = ((double) rand())/RAND_MAX;
// int numValidPartitions = NUM_POINTS/per_thread + 1;
// // first pick a block from the local_sums distribution
// int groupNo = sample_from_distribution(partition_sums, 0, numValidPartitions, rnd[2*j]*partition_sums[numValidPartitions-1]);
// // the start and end index of this block
// int startIndex = groupNo * per_thread;
// int endIndex = (groupNo + 1) * per_thread;
// if(groupNo == numGPUThreads - 1) endIndex = NUM_POINTS;
// // now sample from the cumulative distribution of the block
// int pointIndex = sample_from_distribution(distances, startIndex, endIndex, rnd[2*j+1]*distances[endIndex-1]);
// for (int k = 0; k < DIMENSION; ++k)
// {
// multiset[j*DIMENSION + k] = data[pointIndex*DIMENSION + k];
// }
// }
// gettimeofday(&sample_end,NULL);
// samplingTime += get_time_diff(sample_start,sample_end);
gettimeofday(&sample_start,NULL);
double* nextCenter = mean_heuristic(multiset,N);
memcpy(centers + i*DIMENSION,nextCenter,DIMENSION*sizeof(double));
checkCudaErrors(hipMemcpyToSymbol(dev_centers_global , nextCenter, DIMENSION*sizeof(double), i*DIMENSION*sizeof(double), hipMemcpyHostToDevice));
// checkCudaErrors(hipMemcpy(dev_centers + i*DIMENSION , nextCenter, DIMENSION*sizeof(double), hipMemcpyHostToDevice));
gettimeofday(&sample_end,NULL);
meanHeuristicTime += get_time_diff(sample_start,sample_end);
}
printf("compDistTime\t\t%2.5f\t%2.5f\n",compDistTime,compDistTime/(NUM_CLUSTER-1) );
printf("makeCumulativeTime\t%2.5f\t%2.5f\n",makeCumulativeTime,makeCumulativeTime/(NUM_CLUSTER-1) );
printf("samplingTime\t\t%2.5f\t%2.5f\n",samplingTime,samplingTime/(NUM_CLUSTER-1) );
printf("meanHeuristicTime\t%2.5f\t%2.5f\n",meanHeuristicTime,meanHeuristicTime/(NUM_CLUSTER-1) );
hipProfilerStop();
// ---------------------- GPU-Based Implementation End --------------------------------------
// ---------------------- CPU-Based Implementation Start ------------------------------------
// for(i = 0; i < NUM_CLUSTER; i++)
// {
// struct timeval sample_start,sample_end;
// gettimeofday(&sample_start,NULL);
// multiset = d2_sample(data,centers,NUM_POINTS,N,i);
// // multiset = d2_sample_2(data,centers,NUM_POINTS,N,i,distances);
// gettimeofday(&sample_end,NULL);
// printf("Time taken for d2_sample::%d-->%f\n",i,get_time_diff(sample_start,sample_end));
// samplingTime_1[i] = get_time_diff(sample_start,sample_end);
// gettimeofday(&sample_start,NULL);
// double* nextCenter = mean_heuristic(multiset,N);
// for (int j = 0; j < DIMENSION; ++j)
// {
// centers[i*DIMENSION + j] = nextCenter[j];
// }
// gettimeofday(&sample_end,NULL);
// printf("Time taken for mean_heuristic::%d-->%f\n",i,get_time_diff(sample_start,sample_end));
// samplingTime_2[i] = get_time_diff(sample_start,sample_end);
// }
// ---------------------- CPU-Based Implementation End --------------------------------------
}
else
{
printf("Only d2-seeding support for now::%d\n",method);
printf("Mode::%s\n",mode );
exit(0);
}
gettimeofday(&end,NULL);
initTime[runNum] = get_time_diff(start,end);
// now the Lloyd's iterations
// first we need to figure out the assignments
gettimeofday(&start,NULL);
double prev_cost = DBL_MAX;
int iteration = 0;
char tempFileName[100];
sprintf(tempFileName,"%s%d.txt",baseLogFile,runNum);
logger = fopen(tempFileName,"w");
// Can make first two static arrays
int* cluster_counts = (int*)malloc(NUM_CLUSTER*sizeof(int)); // number of points assigned to each cluster
double* cluster_sums = (double*)malloc(DIMENSION*NUM_CLUSTER*sizeof(double)); // sum of points assigned to each cluster
int** cluster_counts_pointers = (int**)malloc(numThreads*sizeof(int*)); // pointers to local "number of points assigned to each cluster"
double** cluster_sums_pointers = (double**)malloc(numThreads*sizeof(double*)); // pointers to local "sum of points assigned to each cluster"
while(true)
{
iteration++;
// initially, set everything to zero
for(int i = 0; i < NUM_CLUSTER; i++)
{
cluster_counts[i] = 0;
for(int j = 0; j < DIMENSION; j++)
{
cluster_sums[i*DIMENSION + j] = 0;
}
}
// cost according to the current solution
double current_cost = 0.0;
#pragma omp parallel reduction(+: current_cost)
{
int tid = omp_get_thread_num();
int local_cluster_counts[NUM_CLUSTER]; // local "number of points assigned to each cluster"
double local_cluster_sums[DIMENSION*NUM_CLUSTER]; // local "sum of points assigned to each cluster"
for(int i = 0; i < NUM_CLUSTER; i++)
{
local_cluster_counts[i] = 0;
for(int j = 0; j < DIMENSION; j++)
{
local_cluster_sums[i*DIMENSION + j] = 0;
}
}
cluster_counts_pointers[tid] = local_cluster_counts; // set the pointer
cluster_sums_pointers[tid] = local_cluster_sums; // set the pointer
int index;
double min_dist;
double current_dist;
// assign each point to their cluster center in parallel.
// update the cost of current solution and keep updating local counts and sums
#pragma omp for schedule(static)
for (int i = 0; i < NUM_POINTS; i++)
{
index = 0;
min_dist = DBL_MAX;
current_dist = 0;
for(int j = 0; j < NUM_CLUSTER; j++)
{
current_dist = distance(data + i*DIMENSION, centers + j*DIMENSION);
if(current_dist < min_dist)
{
index = j;
min_dist = current_dist;
}
}
current_cost += min_dist;
local_cluster_counts[index] += 1;
for(int j = 0; j < DIMENSION; j++)
{
local_cluster_sums[index*DIMENSION + j] = local_cluster_sums[index*DIMENSION + j] + data[i*DIMENSION + j];
}
}
// aggregate counts and sums across all threads
#pragma omp for schedule(static)
for(int i = 0; i < NUM_CLUSTER; i++)
{
for(int j = 0; j < numThreads; j++)
{
cluster_counts[i] = cluster_counts[i] + cluster_counts_pointers[j][i];
for(int k = 0; k < DIMENSION; k++)
{
cluster_sums[i*DIMENSION + k] = cluster_sums[i*DIMENSION + k] + cluster_sums_pointers[j][i*DIMENSION + k];
}
}
}
}
if(iteration == 1)
{
initCost[runNum] = current_cost;
}
// now scale all the sums by the number of points at each cluster
for(int i = 0; i < NUM_CLUSTER; i++)
{
int scaler = cluster_counts[i];
for(int j = 0; j < DIMENSION; j++)
{
centers[i*DIMENSION + j] = cluster_sums[i*DIMENSION + j]/scaler;
}
}
// log entry
fprintf(logger,"Iteration: %d Cost:%f\n",iteration,current_cost);
// termination criteria
if(1 - current_cost/prev_cost < 0.0001)
{
prev_cost = current_cost;
break;
}
prev_cost = current_cost;
}
gettimeofday(&end,NULL);
finalCost[runNum] = prev_cost;
numIter[runNum] = iteration;
iterTime[runNum] = get_time_diff(start,end)/numIter[runNum];
totalTime[runNum] = iterTime[runNum]*numIter[runNum] + initTime[runNum];
fprintf(logger, "Number of iterations:%f\n",numIter[runNum]);
fprintf(logger, "Initialization time:%f\n",initTime[runNum]);
fprintf(logger, "Initialization cost:%f\n",initCost[runNum]);
fprintf(logger, "Final cost:%f\n",finalCost[runNum]);
fprintf(logger, "Total time:%f\n",totalTime[runNum]);
fprintf(logger, "Per iteration time:%f\n",iterTime[runNum]);
fprintf(logger, "Total iteration time:%f\n",iterTime[runNum]*numIter[runNum]);
if(method == 2) // d2-seeding
{
fprintf(logger,"samplingTime_1:%f\n",mean(samplingTime_1,NUM_CLUSTER));
fprintf(logger,"samplingTime_2:%f\n",mean(samplingTime_2,NUM_CLUSTER));
}
fclose(logger);
free(cluster_counts);
free(cluster_sums);
free(cluster_counts_pointers);
free(cluster_sums_pointers);
free(centers);
hipHostFree(distances); // free this way when using page-locked memory for distances
// free(distances);
free(rnd);
free(multiset);
free(partition_sums);
}
logger = fopen(resultFile,"w");
fprintf(logger, "Initial cost: %f %f\n",mean(initCost,numRuns),sd(initCost,numRuns));
fprintf(logger, "Final cost: %f %f\n",mean(finalCost,numRuns),sd(finalCost,numRuns));
fprintf(logger, "Number of iterations: %f %f\n",mean(numIter,numRuns),sd(numIter,numRuns));
fprintf(logger, "Initialization time: %f %f\n",mean(initTime,numRuns),sd(initTime,numRuns));
fprintf(logger, "Per iteration time: %f %f\n",mean(iterTime,numRuns),sd(iterTime,numRuns));
fclose(logger);
return 0;
}
int sample_from_distribution(double* probabilities, int startIndex, int endIndex, double prob)
{
int start = startIndex,end = endIndex - 1;
int mid;
while(start <= end)
{
mid = (start+end)/2;
if(prob < probabilities[mid-1])
{
end = mid-1;
}
else if(prob > probabilities[mid])
{
start = mid+1;
}
else
{
break;
}
}
return mid;
}
// GPU version of sampling code
__global__ void sample_from_distribution_gpu(double* dev_partition_sums, double* dev_distances, int* dev_sampled_indices, double* dev_rnd,int per_thread, int dev_NUM_POINTS, int dev_N)
{
int numValidPartitions = dev_NUM_POINTS/per_thread + 1;
int start,mid,end,groupNo,pointIndex;
double prob;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < dev_N)
{
// first pick a block from the local_sums distribution
// int groupNo =sample_from_distribution(partition_sums,0, numValidPartitions, rnd[2*i]*partition_sums[numValidPartitions-1]);
start = 0;
end = numValidPartitions - 1;
prob = dev_rnd[2*i]*dev_partition_sums[end];
while(start <= end)
{
mid = (start+end)/2;
if(prob < dev_partition_sums[mid-1])
{
end = mid-1;
}
else if(prob > dev_partition_sums[mid])
{
start = mid+1;
}
else
{
break;
}
}
groupNo = mid;
// the start and end index of this block
// int startIndex = groupNo*per_thread;
// int endIndex = min((groupNo + 1)*per_thread, NUM_POINTS);
// now sample from the cumulative distribution of the block
// int pointIndex = sample_from_distribution(distances, startIndex, endIndex, rnd[2*i+1]*distances[endIndex-1]);
start = groupNo*per_thread;
end = min((groupNo + 1)*per_thread, NUM_POINTS) - 1;
prob = dev_rnd[2*i+1]*dev_distances[end];
while(start <= end)
{
mid = (start+end)/2;
if(prob < dev_distances[mid-1])
{
end = mid-1;
}
else if(prob > dev_distances[mid])
{
start = mid+1;
}
else
{
break;
}
}
pointIndex = mid;
dev_sampled_indices[i] = pointIndex;
}
}
// Sampling for case of strided memory access pattern, no dev_partition_sums here
__global__ void sample_from_distribution_gpu_strided(double* dev_distances, int* dev_sampled_indices, double* dev_rnd, int dev_NUM_POINTS, int dev_N)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < dev_N)
{
int start,mid,end,pointIndex;
double prob;
// the start and end index of this block
// int startIndex = groupNo*per_thread;
// int endIndex = min((groupNo + 1)*per_thread, NUM_POINTS);
// now sample from the cumulative distribution of the block
// int pointIndex = sample_from_distribution(distances, startIndex, endIndex, rnd[2*i+1]*distances[endIndex-1]);
start = 0;
end = dev_NUM_POINTS - 1;
prob = dev_rnd[i]*dev_distances[end];
mid = (start+end)/2;
while(start <= end)
{
mid = (start+end)/2;
if(prob < dev_distances[mid-1])
{
end = mid-1;
}
else if(prob > dev_distances[mid])
{
start = mid+1;
}
else
{
break;
}
}
pointIndex = mid;
dev_sampled_indices[i] = pointIndex;
}
}
// This function calcuates required distance for all points and partitions
// Need to do an all-prefix sum after this to make this thing cumulative
// Can be optimized by using distances calculated in previous iteration, i.e. when the previous center was sampled
// This does not do any sampling business
// Need not call this function when centerIter = 0,
// Not optimized to use distance calculted in previous iteration to calculate distance/cost for points
__global__ void comp_dist_2(double* dev_data,double* dev_distances,double* dev_partition_sums, double* dev_centers,int centerIter,int numPoints,int dev_dimension,int numGPUThreads)
{
// Starting off with very simplistic 1-D threads blocks and 1-D grids
int tid = threadIdx.x + blockIdx.x*blockDim.x;
// int jump = blockDim.x*gridDim.x;
int per_thread = (numPoints + numGPUThreads - 1)/numGPUThreads;// Term in the numerator is added to that we can get ceiling of numPoints/numGPUThreads
int startIndex = tid*per_thread;
int endIndex = min((tid + 1)*per_thread,numPoints);
double min_dist = DBL_MAX, local_dist,temp,prev_val = 0;
for (int dataIndex = startIndex; dataIndex < endIndex; ++dataIndex)
{
min_dist = DBL_MAX;
for (int i = 0; i < centerIter; ++i)
{
local_dist = 0;
for (int j = 0; j < dev_dimension; ++j)
{
temp = dev_data[dataIndex*dev_dimension + j] - dev_centers[i*dev_dimension + j];
local_dist += temp*temp;
}
min_dist = min(min_dist,local_dist);
}
dev_distances[dataIndex] = min_dist*min_dist + prev_val;
// dev_distances[dataIndex] = min_dist*min_dist;
prev_val = dev_distances[dataIndex];
}
dev_partition_sums[tid] = prev_val;
}
// Optimised to use previous distance values to calculate min_dist for points in next iteration
__global__ void comp_dist(double* dev_data,double* dev_distances,double* dev_partition_sums, double* dev_centers,int centerIter,int numPoints,int dev_dimension,int numGPUThreads)
{
// Starting off with very simplistic 1-D threads blocks and 1-D grids
int tid = threadIdx.x + blockIdx.x*blockDim.x;
int per_thread = (numPoints + numGPUThreads - 1)/numGPUThreads;// Term in the numerator is added to that we can get ceiling of numPoints/numGPUThreads
int startIndex = tid*per_thread;
int endIndex = min((tid + 1)*per_thread,numPoints);
double min_dist = DBL_MAX, local_dist,temp,prev_val = 0,old_prev_val=0;
for (int dataIndex = startIndex; dataIndex < endIndex; ++dataIndex)
{
if (centerIter == 1) // This is the first time dev_distances will get its values
{
min_dist = 0;
int i = 0;
for (int j = 0; j < dev_dimension; ++j)
{
temp = dev_data[dataIndex*dev_dimension + j] - dev_centers[i*dev_dimension + j];
min_dist += temp*temp;
}
dev_distances[dataIndex] = min_dist*min_dist + prev_val; // make it cumulative as you calculate it
prev_val = dev_distances[dataIndex];
}
else
{
int i = centerIter - 1; // i denotes the last center that was added to the list of centers
min_dist = dev_distances[dataIndex] - old_prev_val;
old_prev_val= dev_distances[dataIndex];
local_dist = 0;
for (int j = 0; j < dev_dimension; ++j)
{
temp = dev_data[dataIndex*dev_dimension + j] - dev_centers[i*dev_dimension + j];
local_dist += temp*temp;
}
min_dist = min(min_dist,local_dist*local_dist);
dev_distances[dataIndex] = min_dist + prev_val; // No need to square min_dist here, it is already squared value
prev_val = dev_distances[dataIndex];
}
}
dev_partition_sums[tid] = prev_val;
}
// Optimised to use previous distance values to calculate min_dist for points in next iteration
// Also makes use of constant memory for storing centers
__global__ void comp_dist_glbl(double* dev_data,double* dev_distances,double* dev_partition_sums,int centerIter,int numPoints,int dev_dimension,int numGPUThreads)
{
// Starting off with very simplistic 1-D threads blocks and 1-D grids
int tid = threadIdx.x + blockIdx.x*blockDim.x;
int per_thread = (numPoints + numGPUThreads - 1)/numGPUThreads;// Term in the numerator is added to that we can get ceiling of numPoints/numGPUThreads
int startIndex = tid*per_thread;
int endIndex = min((tid + 1)*per_thread,numPoints);
double min_dist = DBL_MAX, local_dist,temp,prev_val = 0,old_prev_val=0;
for (int dataIndex = startIndex; dataIndex < endIndex; ++dataIndex)
{
if (centerIter == 1) // This is the first time dev_distances will get its values
{
min_dist = 0;
int i = 0;
for (int j = 0; j < dev_dimension; ++j)
{
temp = dev_data[dataIndex*dev_dimension + j] - dev_centers_global[i*dev_dimension + j];
min_dist += temp*temp;
}
dev_distances[dataIndex] = min_dist*min_dist + prev_val; // make it cumulative as you calculate it
prev_val = dev_distances[dataIndex];
}
else
{
int i = centerIter - 1; // i denotes the last center that was added to the list of centers
min_dist = dev_distances[dataIndex] - old_prev_val;
old_prev_val= dev_distances[dataIndex];
local_dist = 0;
for (int j = 0; j < dev_dimension; ++j)
{
temp = dev_data[dataIndex*dev_dimension + j] - dev_centers_global[i*dev_dimension + j];
local_dist += temp*temp;
}
min_dist = min(min_dist,local_dist*local_dist);
dev_distances[dataIndex] = min_dist + prev_val; // No need to square min_dist here, it is already squared value
prev_val = dev_distances[dataIndex];
}
}
dev_partition_sums[tid] = prev_val;
}
// This addtionally does things in strided fashion as opposed to assigning each thread some fixed block
__global__ void comp_dist_glbl_strided(double* dev_data,double* dev_distances,double* dev_partition_sums,int centerIter,int numPoints,int dev_dimension,int numGPUThreads)
{
int dataIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
double min_dist, local_dist, temp;
while(dataIndex < numPoints)
{
if (centerIter == 1) // This is the first time dev_distances will get its values
{
min_dist = 0;
for (int j = 0; j < dev_dimension; ++j)
{
temp = dev_data[dataIndex*dev_dimension + j] - dev_centers_global[j]; // Accessing 0th center of dev_center_global
min_dist += temp*temp;
}
dev_distances[dataIndex] = min_dist*min_dist;
}
else
{
// Assuming that dev_distances has been made cumulative, after this function call
// if (dataIndex == 0)
// {
// min_dist = dev_distances[dataIndex];
// }
// else
// {
// min_dist = dev_distances[dataIndex] - dev_distances[dataIndex - 1];
// }
min_dist = DBL_MAX;
for (int i = 0; i < centerIter; ++i)
{
local_dist = 0;
for (int j = 0; j < dev_dimension; ++j)
{
temp = dev_data[dataIndex*dev_dimension + j] - dev_centers_global[i*dev_dimension + j];
local_dist += temp*temp;
}
min_dist = min(min_dist,local_dist*local_dist);
}
dev_distances[dataIndex] = min_dist; // --No-- Need to square min_dist here, it is *not* already squared value
}
dataIndex += stride;
}
}
// generate numSamples sized multiset from weighted data with weights wrt. centers where the current size of centers is size
// numPts : number of points in data
// numSamples: number of points to sample
// size : size of centers i.e. number of centers chosen already
double* d2_sample(double* data,double* centers,int numPts, int numSamples, int size)
{
// cumulative probability for each group of points
// the distances are cumulative only for a group. So, [0,...,numPts/numThreads], [numPts/numThreads+1,...,numPts*2/numThreads],... and so on. These groups contain cumulative distances.
double* distances = (double*)malloc(numPts*sizeof(double));
double* local_sums = (double*)malloc(numThreads*sizeof(double)); // local sums. first is sum for [0...numPts/numThreads-1], and so on. This is also a cumulative distribution.
double* result = (double*)malloc(numSamples*DIMENSION*sizeof(double));
for (int i = 0; i < numSamples; ++i)
{
for (int j = 0; j < DIMENSION; ++j)
{
result[i*DIMENSION + j] = 0;
}
}
// we're gonna need 2*numSamples random numbers.
double* rnd = (double*)malloc(2*numSamples*sizeof(double));
int i;
for(i = 0; i < 2*numSamples; i++){
rnd[i] = ((double) rand())/RAND_MAX;
}
#pragma omp parallel
{
struct timeval start,end;
gettimeofday(&start,NULL);
// create blocks of data
int tid = omp_get_thread_num();
int per_thread = (numPts + numThreads - 1) / numThreads;
int lower = tid * per_thread;
int higher = (tid + 1) * per_thread;
if(tid == numThreads - 1) higher = numPts;
int block_size = higher - lower;
double min_dist, local_dist;
double* p;
double prev_val = 0;
// cost of each block
double local_sum = 0;
int center_size = size;
int i,j;
for(i = 0;i < block_size;i++)
{
if(center_size == 0){
local_sum += 1;
distances[lower+i] = 1 + prev_val;
} else{
p = data + (lower+i)*DIMENSION;
min_dist = distance(p,centers);
for (j = 1; j < center_size; j++) {
local_dist = distance(p, centers + j*DIMENSION);
min_dist = min(min_dist, local_dist); // calculating minimum distances
}
local_sum += min_dist * min_dist;
distances[lower+i] = min_dist * min_dist + prev_val; // make cumulative
}
prev_val = distances[lower+i];
}
local_sums[tid] = local_sum;
#pragma omp barrier // everyone is here now
#pragma omp master
{
for(int i=1;i<numThreads;i++){
local_sums[i] = local_sums[i] + local_sums[i-1]; // make cumulative
}
// printf("Number of threads::%d\n",omp_get_num_threads());
}
gettimeofday(&end,NULL);
float cost_time = get_time_diff(start,end);
#pragma omp barrier
gettimeofday(&start,NULL);
#pragma omp for
for(int i = 0;i < numSamples;i++){
// first pick a block from the local_sums distribution
int groupNo = sample_from_distribution(local_sums, 0, numThreads, rnd[i*2]*local_sums[numThreads-1]);
// the start and end index of this block
int startIndex = groupNo * per_thread;
int endIndex = (groupNo + 1) * per_thread;
if(groupNo == numThreads - 1) endIndex = numPts;
// now sample from the cumulative distribution of the block
int pointIndex = sample_from_distribution(distances, startIndex, endIndex, rnd[2*i+1]*distances[endIndex-1]);
for (int j = 0; j < DIMENSION; ++j)
{
result[i*DIMENSION + j] = data[pointIndex*DIMENSION + j];
}
// memcpy(result + i*DIMENSION, data + pointIndex*DIMENSION, DIMENSION*sizeof(double));
}
gettimeofday(&end,NULL);
float sample_time = get_time_diff(start,end);
// if (center_size >= 99)
// {
// printf("Cost computation time ::%f\n",cost_time);
// printf("Sampling time ::%f\n",sample_time);
// }
}
free(distances);
free(local_sums);
return result;
}
// This version of d2_sample has optimized cost calculation by using cost computed in last iteration
double* d2_sample_2(double* data,double* centers,int numPts, int numSamples, int size, double* distances)
{
// cumulative probability for each group of points
// the distances are cumulative only for a group. So, [0,...,numPts/numThreads], [numPts/numThreads+1,...,numPts*2/numThreads],... and so on. These groups contain cumulative distances.
double* local_sums = (double*)malloc(numThreads*sizeof(double)); // local sums. first is sum for [0...numPts/numThreads-1], and so on. This is also a cumulative distribution.
double* result = (double*)malloc(numSamples*DIMENSION*sizeof(double));
for (int i = 0; i < numSamples; ++i)
{
for (int j = 0; j < DIMENSION; ++j)
{
result[i*DIMENSION + j] = 0;
}
}
// we're gonna need 2*numSamples random numbers.
double* rnd = (double*)malloc(2*numSamples*sizeof(double));
int i;
for(i = 0; i < 2*numSamples; i++){
rnd[i] = ((double) rand())/RAND_MAX;
}
#pragma omp parallel
{
struct timeval start,end;
gettimeofday(&start,NULL);
// create blocks of data
int tid = omp_get_thread_num();
int per_thread = (numPts + numThreads - 1) / numThreads;
int lower = tid * per_thread;
int higher = (tid + 1) * per_thread;
if(tid == numThreads - 1) higher = numPts;
int block_size = higher - lower;
double min_dist, local_dist;
double* p;
double prev_val = 0, old_prev_val = 0;
// cost of each block
double local_sum = 0;
int center_size = size;
int i;
for(i = 0;i < block_size;i++)
{
if(center_size == 0)
{
local_sum += 1;
distances[lower+i] = 1 + prev_val;
}
else if (center_size == 1)
{
p = data + (lower+i)*DIMENSION;
min_dist = distance(p,centers);
local_sum += min_dist * min_dist;
distances[lower+i] = min_dist * min_dist + prev_val; // make cumulative
}
else
{
p = data + (lower+i)*DIMENSION;
// min_dist = distance(p,centers[0]);
min_dist = distances[lower+i] - old_prev_val;
old_prev_val = distances[lower+i]; // Important for it to have old value of distance[lower+i] so that min_dist is correct in next iteration
local_dist = distance(p,centers + (center_size-1)*DIMENSION); // Find distance wrt last added new center;
local_dist = local_dist*local_dist;
min_dist = min(min_dist,local_dist);
local_sum += min_dist; // min_dist is already squared here because it is calculated usign cumulative distance
distances[lower+i] = min_dist + prev_val; // make cumulative
prev_val = distances[lower+i];
}
prev_val = distances[lower+i];
}
local_sums[tid] = local_sum;
#pragma omp barrier // everyone is here now
#pragma omp master
{
for(int i=1;i<numThreads;i++){
local_sums[i] = local_sums[i] + local_sums[i-1]; // make cumulative
}
}
gettimeofday(&end,NULL);
float cost_time = get_time_diff(start,end);
#pragma omp barrier
gettimeofday(&start,NULL);
#pragma omp for
for(int i = 0;i < numSamples;i++){
// first pick a block from the local_sums distribution
int groupNo = sample_from_distribution(local_sums, 0, numThreads, rnd[i*2]*local_sums[numThreads-1]);
// the start and end index of this block
int startIndex = groupNo * per_thread;
int endIndex = (groupNo + 1) * per_thread;
if(groupNo == numThreads - 1) endIndex = numPts;
// now sample from the cumulative distribution of the block
int pointIndex = sample_from_distribution(distances, startIndex, endIndex, rnd[2*i+1]*distances[endIndex-1]);
for (int j = 0; j < DIMENSION; ++j)
{
result[i*DIMENSION + j] = data[pointIndex*DIMENSION + j];
}
// memcpy(result + i*DIMENSION, data + pointIndex*DIMENSION, DIMENSION*sizeof(double));
}
gettimeofday(&end,NULL);
float sample_time = get_time_diff(start,end);
// if (center_size >= 99)
// {
// printf("Cost computation time ::%f\n",cost_time);
// printf("Sampling time ::%f\n",sample_time);
// }
}
free(local_sums);
return result;
}
double* mean_heuristic(double* multiset,int multisetSize)
{
// first do a kmeans++ initialiation on the multiset
int i,j;
// gettimeofday(&start,NULL);
double* level_2_sample = (double*)malloc(NUM_CLUSTER*DIMENSION*sizeof(double));
for(i = 0; i < NUM_CLUSTER; i++)
{
double* point = d2_sample(multiset,level_2_sample,multisetSize,1,i);
for (j = 0; j < DIMENSION; ++j)
{
level_2_sample[i*DIMENSION + j] = point[j];
}
// memcpy(level_2_sample + i*DIMENSION, point, DIMENSION*sizeof(double)) ;
}
// gettimeofday(&end,NULL);
// printf("Time taken to choose k centers::%f\n",get_time_diff(start,end));
// gettimeofday(&start,NULL);
int* counts = (int*)malloc(NUM_CLUSTER*sizeof(int)); // number of points assigned to each kmeans++ center
double* cluster_means = (double*)malloc(NUM_CLUSTER*DIMENSION*sizeof(double)); // for taking the centroid later on. We maintain a sum of all points assigned to a center here.
for (i = 0; i < NUM_CLUSTER; i++)
{
counts[i] = 0;
for(j = 0; j< DIMENSION; j++)
{
cluster_means[i*DIMENSION + j] = 0;
}
}
// here the heuristic does things in a parallel fashion
// maintain a local structure for each thread to keep track of cluster sums and counts
int** local_tmp_counts = (int**)malloc(numThreads*sizeof(int*));
double** local_tmp_cluster_means = (double**)malloc(numThreads*sizeof(double*));
#pragma omp parallel
{
int tid = omp_get_thread_num();
int* local_counts = (int*)malloc(NUM_CLUSTER*sizeof(int));
double* local_cluster_means = (double*)malloc(NUM_CLUSTER*DIMENSION*sizeof(double));
for (int i = 0; i < NUM_CLUSTER; i++)
{
local_counts[i] = 0;
for(int j = 0; j < DIMENSION; j++)
{
local_cluster_means[i*DIMENSION + j] = 0;
}
}
local_tmp_counts[tid] = local_counts; // save the pointers to local data structures
local_tmp_cluster_means[tid] = local_cluster_means;
double min_dist, tmp_dist;
int index;
#pragma omp for schedule(static)
for (int i = 0; i < multisetSize; i++)
{
min_dist = distance(level_2_sample,multiset + i*DIMENSION); // distance of each kmeans++ center from the points in sampled_set
index = 0;
for (int j = 1; j < NUM_CLUSTER; j++)
{
tmp_dist = distance(level_2_sample + j*DIMENSION, multiset+ i*DIMENSION); // figure out the minimum and assign the point to that kmeans++ center
if (tmp_dist < min_dist)
{
min_dist = tmp_dist;
index = j;
}
}
for(int j = 0; j < DIMENSION; j++)
{
local_cluster_means[index*DIMENSION + j] += multiset[i*DIMENSION + j];
}
local_counts[index]++;
}
// aggregate across all threads
#pragma omp for schedule(static)
for (int i = 0; i < NUM_CLUSTER; i++)
{
for (int p = 0; p < numThreads ; p++)
{
for(int j = 0; j < DIMENSION; j++)
{
cluster_means[i*DIMENSION + j] += local_tmp_cluster_means[p][i*DIMENSION + j];
}
counts[i] += local_tmp_counts[p][i];
}
}
free(local_counts);
free(local_cluster_means);
}
int max = counts[0];
int index = 0;
for (int i = 1; i < NUM_CLUSTER; i++)
{
if (counts[i] > max)
{
max = counts[i];
index = i; // largest cluster with maximum points from sampled_set assigned to it.
}
}
// gettimeofday(&end,NULL);
// printf("Time for finding partitions::%f\n",get_time_diff(start,end));
// do the scaling to find the mean
for(int i = 0; i < DIMENSION; i++){
cluster_means[index*DIMENSION + i] /= counts[index];
}
free(counts);
free(cluster_means);
free(local_tmp_counts);
free(local_tmp_cluster_means);
return cluster_means + index*DIMENSION;
}
double distance(double* p1, double* p2)
{
int i;
double localSum = 0;
for (i = 0; i < DIMENSION; ++i)
{
localSum += (p1[i] - p2[i])*(p1[i] - p2[i]);
}
return localSum;
}
void write_centers_to_file(double* centers)
{
FILE* writer = fopen("dataExchange.txt","w");
for (int i = 0; i < NUM_CLUSTER; ++i)
{
for (int j = 0; j < DIMENSION; ++j)
{
fprintf(writer, "%f ",centers[i*DIMENSION + j] );
}
fprintf(writer, "\n");
}
}
static inline float mean(double* a, int n)
{
double sum = 0;
for(int i = 0; i < n; i++){
sum += a[i];
}
return sum/n;
}
static inline float sd(double* a, int n)
{
double sum = 0;
for(int i = 0; i < n; i++){
sum += a[i];
}
double mean = sum/n;
sum = 0;
for(int i = 0; i < n; i++){
sum += (a[i] - mean) * (a[i] - mean);
}
return sqrt(sum/n);
}
static inline double get_time_diff(struct timeval t1, struct timeval t2){
return t2.tv_sec - t1.tv_sec + 1e-6 * (t2.tv_usec - t1.tv_usec);
}
| ca0a98dfe9f59be6bbf769c94f65435a77eb1fb5.cu | #include "main.h"
// g++ -D BIRCH1 -g mainOMP.cpp -std=c++11 -O3 -msse4.2 -fopenmp -o birch1 -lm
// random_device rd;
// mt19937 gen(rd());
// unsigned int max_val = gen.max();
int numThreads = 1;
__constant__ double dev_centers_global[NUM_CLUSTER*DIMENSION]; // For using constant memory
int main(int argc, char const *argv[])
{
// Currently no argument processing logic, will always run birch1 for 2 times with N=10k
srand(time(NULL));
int numRuns,method;
int N = 0;
// for k-means parallel
int rounds = 5;
double oversampling = NUM_CLUSTER;
oversampling = 2*oversampling;
char dataFileName[100];
char mode[100];
char baseLogFile[100];
char resultFile[100];
sprintf(dataFileName,"%s%s","../data/",DATA);
sprintf(mode,"%s",argv[1]);
sprintf(baseLogFile,"../logs/%s/%s_",DATA,mode);
numRuns = atoi(argv[2]);
method = -1;
numThreads = 1;
if(getenv("OMP_NUM_THREADS") != NULL)
{
numThreads = atoi(getenv("OMP_NUM_THREADS"));
printf("numThreads as gotten from env::%d\n",numThreads);
if(numThreads == 0)
{
numThreads = 1;
}
}
else
{
printf("numThreads as gotten by default::%d\n",numThreads);
}
if(strcmp(mode,"random")==0)
{
method = 0;
}
if(strcmp(mode,"kmeans++")==0)
{
method = 1;
}
if(strcmp(mode,"d2-seeding")==0)
{
method = 2;
N = floor(NUM_CLUSTER * atof(argv[3]));
sprintf(baseLogFile,"%sN=%sk_",baseLogFile,argv[3]);
}
if(strcmp(mode,"kmeans-par")==0)
{
method = 3;
oversampling = NUM_CLUSTER * atof(argv[3]);
rounds = atoi(argv[4]);
sprintf(baseLogFile,"%sl=%sk_r=%d_",baseLogFile,argv[3],rounds);
}
// base log file name for individual runs
sprintf(baseLogFile,"%sthreads=%d_",baseLogFile,numThreads);
// log file for combined results. Mean and standard deviations
sprintf(resultFile,"%sresult.txt",baseLogFile);
sprintf(baseLogFile,"%srunNo=",baseLogFile);
struct timeval start,end;
// collect stats about all relevant parameters
double initTime[numRuns];
double iterTime[numRuns];
double totalTime[numRuns];
double initCost[numRuns];
double finalCost[numRuns];
double numIter[numRuns];
// read the data into a vector of "vector"
double* data;
FILE* reader;
int i = 0,j = 0;
data = (double*)malloc(NUM_POINTS*DIMENSION*sizeof(double));
reader = fopen(dataFileName,"r");
while(i < NUM_POINTS)
{
j = 0;
while(j < DIMENSION)
{
fscanf(reader,"\t%lf",&(data[i*DIMENSION + j]));
j++;
}
i++;
}
// Copy data onto device memory
double* dev_data;
cudaMalloc((void**)&dev_data,DIMENSION*NUM_POINTS*sizeof(double));
cudaMemcpy(dev_data,data,DIMENSION*NUM_POINTS*sizeof(double),cudaMemcpyHostToDevice);
FILE* logger;
int runNum;
for(runNum = 0; runNum < numRuns ; runNum++)
{
double samplingTime_1[NUM_CLUSTER];
double samplingTime_2[NUM_CLUSTER];
printf("Running runNum::%d\n",runNum );
gettimeofday(&start,NULL);
int numBlocks = 8;
int numThreadsPerBlock = 1024;
int numSampleBlocks = 128;
int numSampleTperB = 32;
int numGPUThreads = numBlocks*numThreadsPerBlock;
// double* distances_debug = (double*)malloc(NUM_POINTS*sizeof(double));
double* distances; // Using page-locked memory for distances
cudaHostAlloc((void**)&distances,NUM_POINTS*sizeof(double),cudaHostAllocDefault);
double* centers = (double*)malloc(NUM_CLUSTER*DIMENSION*sizeof(double));
double* rnd = (double*)malloc(2*N*sizeof(double));
double* multiset = (double*)malloc(N*DIMENSION*sizeof(double));
double* partition_sums = (double*)malloc(numGPUThreads*sizeof(double));
// double* partition_sums_debug = (double*)malloc(numGPUThreads*sizeof(double));
int* sampled_indices = (int*)malloc(N*sizeof(int));
double* dev_distances;
double* dev_partition_sums;
double* dev_rnd;
int* dev_sampled_indices;
// double* dev_centers; // When not using constant memory for centers
checkCudaErrors(cudaMalloc((void**)&dev_distances,NUM_POINTS*sizeof(double)));
checkCudaErrors(cudaMalloc((void**)&dev_partition_sums,numGPUThreads*sizeof(double)));
checkCudaErrors(cudaMalloc((void**)&dev_sampled_indices,N*sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&dev_rnd,2*N*sizeof(double)));
// checkCudaErrors(cudaMalloc((void**)&dev_centers,NUM_CLUSTER*DIMENSION*sizeof(double))); // No need when using constant memory
// initialize the initial centers
if(method == 2) // d2-seeding
{
// ---------------------- GPU-Based Implementation Start ------------------------------------
cudaProfilerStart();
// First choosing the first point uniformly at random, no need to sample N points and all here
int tempPointIndex = (((double) rand())/RAND_MAX)*NUM_POINTS;
memcpy(centers, data+tempPointIndex*DIMENSION, DIMENSION*sizeof(double));
checkCudaErrors(cudaMemcpyToSymbol(dev_centers_global, data+tempPointIndex*DIMENSION, DIMENSION*sizeof(double),0,cudaMemcpyHostToDevice));
// checkCudaErrors(cudaMemcpy(dev_centers, data+tempPointIndex*DIMENSION, DIMENSION*sizeof(double),cudaMemcpyHostToDevice));
double compDistTime = 0, makeCumulativeTime = 0, samplingTime = 0, meanHeuristicTime = 0;
for(i = 1; i < NUM_CLUSTER; i++)
{
struct timeval sample_start,sample_end;
gettimeofday(&sample_start,NULL);
for(j = 0; j < N; ++j)
{
rnd[2*j] = ((double) rand())/RAND_MAX;
rnd[2*j+1] = ((double) rand())/RAND_MAX;
}
cudaMemcpy(dev_rnd,rnd,2*N*sizeof(double),cudaMemcpyHostToDevice);// Can be overlapped with computation
// comp_dist<<<numBlocks,numThreadsPerBlock>>>(dev_data, dev_distances, dev_partition_sums, dev_centers, i, NUM_POINTS, DIMENSION, numGPUThreads);
// For blocked access pattern
// comp_dist_glbl<<<numBlocks,numThreadsPerBlock>>>(dev_data, dev_distances, dev_partition_sums, i, NUM_POINTS, DIMENSION, numGPUThreads);
// cudaMemcpy(partition_sums,dev_partition_sums,numGPUThreads*sizeof(double),cudaMemcpyDeviceToHost);
// for (j = 1; j < numGPUThreads; ++j) // Need to do this scan operation on GPU only, but testing things first
// {
// partition_sums[j] += partition_sums[j-1];
// }
// cudaMemcpy(dev_partition_sums,partition_sums,numGPUThreads*sizeof(double),cudaMemcpyHostToDevice);
// int per_thread = (NUM_POINTS + numGPUThreads-1)/numGPUThreads;
// sample_from_distribution_gpu<<<numSampleBlocks,numSampleTperB>>>(dev_partition_sums, dev_distances, dev_sampled_indices, dev_rnd, per_thread, NUM_POINTS, N);
// For strided memory access pattern
comp_dist_glbl_strided<<<numBlocks,numThreadsPerBlock>>>(dev_data, dev_distances, dev_partition_sums, i, NUM_POINTS, DIMENSION, numGPUThreads);
cudaMemcpy(distances,dev_distances,NUM_POINTS*sizeof(double),cudaMemcpyDeviceToHost);
for (j = 1; j < NUM_POINTS; ++j)
{
distances[j] += distances[j-1];
}
cudaMemcpy(dev_distances,distances,NUM_POINTS*sizeof(double),cudaMemcpyHostToDevice);
sample_from_distribution_gpu_strided<<<numSampleBlocks,numSampleTperB>>>(dev_distances, dev_sampled_indices, dev_rnd, NUM_POINTS, N);
// // Division of distance array into blocks so that sampling is similar to blocked cost calculation approach
// int per_thread = (NUM_POINTS + numGPUThreads-1)/numGPUThreads;
// cudaMemcpy(distances,dev_distances,NUM_POINTS*sizeof(double),cudaMemcpyDeviceToHost);
// double prev_val = distances[0],prev_part_val=0;
// int p_ctr = 0;
// for (j = 1; j < NUM_POINTS; ++j)
// {
// distances[j] += prev_val;
// prev_val = distances[j];
// if ((j+1)%per_thread == 0)
// {
// partition_sums[p_ctr] = distances[j] + prev_part_val;
// prev_part_val = partition_sums[p_ctr];
// p_ctr += 1;
// prev_val = 0;
// }
// else if (j == NUM_POINTS -1)
// {
// partition_sums[p_ctr] = distances[j] + prev_part_val;
// prev_part_val = partition_sums[p_ctr];
// p_ctr += 1;
// prev_val = 0;
// }
// }
// cudaMemcpy(dev_distances,distances,NUM_POINTS*sizeof(double),cudaMemcpyHostToDevice);
// cudaMemcpy(dev_partition_sums,partition_sums,numGPUThreads*sizeof(double),cudaMemcpyHostToDevice);
// sample_from_distribution_gpu<<<numSampleBlocks,numSampleTperB>>>(dev_partition_sums, dev_distances, dev_sampled_indices, dev_rnd, per_thread, NUM_POINTS, N);
// Copy back indices of sampled points, no need to copy those points as we have the data here as well
cudaMemcpy(sampled_indices,dev_sampled_indices,N*sizeof(int),cudaMemcpyDeviceToHost);
for (int copy_i = 0; copy_i < N; ++copy_i)
{
int index = sampled_indices[copy_i];
for (int copy_j = 0; copy_j < DIMENSION; ++copy_j)
{
multiset[copy_i*DIMENSION + copy_j] = data[index*DIMENSION + copy_j];
}
}
gettimeofday(&sample_end,NULL);
compDistTime += get_time_diff(sample_start,sample_end);
// Code for sampling on CPU (first GPU implementation)
// // copy back to host memory for sampling purpose,
// cudaMemcpy(distances,dev_distances,NUM_POINTS*sizeof(double),cudaMemcpyDeviceToHost);
// cudaMemcpy(partition_sums,dev_partition_sums,numGPUThreads*sizeof(double),cudaMemcpyDeviceToHost);
// // Make it cumulative for sampling purpose, can be done on GPU as well
// // Already made cumulative above
// gettimeofday(&sample_start,NULL);
// for (j = 1; j < numGPUThreads; ++j)
// {
// partition_sums[j] += partition_sums[j-1];
// }
// gettimeofday(&sample_end,NULL);
// makeCumulativeTime += get_time_diff(sample_start,sample_end);
// int per_thread = (NUM_POINTS + numGPUThreads-1)/numGPUThreads;
// gettimeofday(&sample_start,NULL);
// for(j = 0 ; j < N ; j++)
// {
// rnd[2*j] = ((double) rand())/RAND_MAX;
// rnd[2*j+1] = ((double) rand())/RAND_MAX;
// int numValidPartitions = NUM_POINTS/per_thread + 1;
// // first pick a block from the local_sums distribution
// int groupNo = sample_from_distribution(partition_sums, 0, numValidPartitions, rnd[2*j]*partition_sums[numValidPartitions-1]);
// // the start and end index of this block
// int startIndex = groupNo * per_thread;
// int endIndex = (groupNo + 1) * per_thread;
// if(groupNo == numGPUThreads - 1) endIndex = NUM_POINTS;
// // now sample from the cumulative distribution of the block
// int pointIndex = sample_from_distribution(distances, startIndex, endIndex, rnd[2*j+1]*distances[endIndex-1]);
// for (int k = 0; k < DIMENSION; ++k)
// {
// multiset[j*DIMENSION + k] = data[pointIndex*DIMENSION + k];
// }
// }
// gettimeofday(&sample_end,NULL);
// samplingTime += get_time_diff(sample_start,sample_end);
gettimeofday(&sample_start,NULL);
double* nextCenter = mean_heuristic(multiset,N);
memcpy(centers + i*DIMENSION,nextCenter,DIMENSION*sizeof(double));
checkCudaErrors(cudaMemcpyToSymbol(dev_centers_global , nextCenter, DIMENSION*sizeof(double), i*DIMENSION*sizeof(double), cudaMemcpyHostToDevice));
// checkCudaErrors(cudaMemcpy(dev_centers + i*DIMENSION , nextCenter, DIMENSION*sizeof(double), cudaMemcpyHostToDevice));
gettimeofday(&sample_end,NULL);
meanHeuristicTime += get_time_diff(sample_start,sample_end);
}
printf("compDistTime\t\t%2.5f\t%2.5f\n",compDistTime,compDistTime/(NUM_CLUSTER-1) );
printf("makeCumulativeTime\t%2.5f\t%2.5f\n",makeCumulativeTime,makeCumulativeTime/(NUM_CLUSTER-1) );
printf("samplingTime\t\t%2.5f\t%2.5f\n",samplingTime,samplingTime/(NUM_CLUSTER-1) );
printf("meanHeuristicTime\t%2.5f\t%2.5f\n",meanHeuristicTime,meanHeuristicTime/(NUM_CLUSTER-1) );
cudaProfilerStop();
// ---------------------- GPU-Based Implementation End --------------------------------------
// ---------------------- CPU-Based Implementation Start ------------------------------------
// for(i = 0; i < NUM_CLUSTER; i++)
// {
// struct timeval sample_start,sample_end;
// gettimeofday(&sample_start,NULL);
// multiset = d2_sample(data,centers,NUM_POINTS,N,i);
// // multiset = d2_sample_2(data,centers,NUM_POINTS,N,i,distances);
// gettimeofday(&sample_end,NULL);
// printf("Time taken for d2_sample::%d-->%f\n",i,get_time_diff(sample_start,sample_end));
// samplingTime_1[i] = get_time_diff(sample_start,sample_end);
// gettimeofday(&sample_start,NULL);
// double* nextCenter = mean_heuristic(multiset,N);
// for (int j = 0; j < DIMENSION; ++j)
// {
// centers[i*DIMENSION + j] = nextCenter[j];
// }
// gettimeofday(&sample_end,NULL);
// printf("Time taken for mean_heuristic::%d-->%f\n",i,get_time_diff(sample_start,sample_end));
// samplingTime_2[i] = get_time_diff(sample_start,sample_end);
// }
// ---------------------- CPU-Based Implementation End --------------------------------------
}
else
{
printf("Only d2-seeding support for now::%d\n",method);
printf("Mode::%s\n",mode );
exit(0);
}
gettimeofday(&end,NULL);
initTime[runNum] = get_time_diff(start,end);
// now the Lloyd's iterations
// first we need to figure out the assignments
gettimeofday(&start,NULL);
double prev_cost = DBL_MAX;
int iteration = 0;
char tempFileName[100];
sprintf(tempFileName,"%s%d.txt",baseLogFile,runNum);
logger = fopen(tempFileName,"w");
// Can make first two static arrays
int* cluster_counts = (int*)malloc(NUM_CLUSTER*sizeof(int)); // number of points assigned to each cluster
double* cluster_sums = (double*)malloc(DIMENSION*NUM_CLUSTER*sizeof(double)); // sum of points assigned to each cluster
int** cluster_counts_pointers = (int**)malloc(numThreads*sizeof(int*)); // pointers to local "number of points assigned to each cluster"
double** cluster_sums_pointers = (double**)malloc(numThreads*sizeof(double*)); // pointers to local "sum of points assigned to each cluster"
while(true)
{
iteration++;
// initially, set everything to zero
for(int i = 0; i < NUM_CLUSTER; i++)
{
cluster_counts[i] = 0;
for(int j = 0; j < DIMENSION; j++)
{
cluster_sums[i*DIMENSION + j] = 0;
}
}
// cost according to the current solution
double current_cost = 0.0;
#pragma omp parallel reduction(+: current_cost)
{
int tid = omp_get_thread_num();
int local_cluster_counts[NUM_CLUSTER]; // local "number of points assigned to each cluster"
double local_cluster_sums[DIMENSION*NUM_CLUSTER]; // local "sum of points assigned to each cluster"
for(int i = 0; i < NUM_CLUSTER; i++)
{
local_cluster_counts[i] = 0;
for(int j = 0; j < DIMENSION; j++)
{
local_cluster_sums[i*DIMENSION + j] = 0;
}
}
cluster_counts_pointers[tid] = local_cluster_counts; // set the pointer
cluster_sums_pointers[tid] = local_cluster_sums; // set the pointer
int index;
double min_dist;
double current_dist;
// assign each point to their cluster center in parallel.
// update the cost of current solution and keep updating local counts and sums
#pragma omp for schedule(static)
for (int i = 0; i < NUM_POINTS; i++)
{
index = 0;
min_dist = DBL_MAX;
current_dist = 0;
for(int j = 0; j < NUM_CLUSTER; j++)
{
current_dist = distance(data + i*DIMENSION, centers + j*DIMENSION);
if(current_dist < min_dist)
{
index = j;
min_dist = current_dist;
}
}
current_cost += min_dist;
local_cluster_counts[index] += 1;
for(int j = 0; j < DIMENSION; j++)
{
local_cluster_sums[index*DIMENSION + j] = local_cluster_sums[index*DIMENSION + j] + data[i*DIMENSION + j];
}
}
// aggregate counts and sums across all threads
#pragma omp for schedule(static)
for(int i = 0; i < NUM_CLUSTER; i++)
{
for(int j = 0; j < numThreads; j++)
{
cluster_counts[i] = cluster_counts[i] + cluster_counts_pointers[j][i];
for(int k = 0; k < DIMENSION; k++)
{
cluster_sums[i*DIMENSION + k] = cluster_sums[i*DIMENSION + k] + cluster_sums_pointers[j][i*DIMENSION + k];
}
}
}
}
if(iteration == 1)
{
initCost[runNum] = current_cost;
}
// now scale all the sums by the number of points at each cluster
for(int i = 0; i < NUM_CLUSTER; i++)
{
int scaler = cluster_counts[i];
for(int j = 0; j < DIMENSION; j++)
{
centers[i*DIMENSION + j] = cluster_sums[i*DIMENSION + j]/scaler;
}
}
// log entry
fprintf(logger,"Iteration: %d Cost:%f\n",iteration,current_cost);
// termination criteria
if(1 - current_cost/prev_cost < 0.0001)
{
prev_cost = current_cost;
break;
}
prev_cost = current_cost;
}
gettimeofday(&end,NULL);
finalCost[runNum] = prev_cost;
numIter[runNum] = iteration;
iterTime[runNum] = get_time_diff(start,end)/numIter[runNum];
totalTime[runNum] = iterTime[runNum]*numIter[runNum] + initTime[runNum];
fprintf(logger, "Number of iterations:%f\n",numIter[runNum]);
fprintf(logger, "Initialization time:%f\n",initTime[runNum]);
fprintf(logger, "Initialization cost:%f\n",initCost[runNum]);
fprintf(logger, "Final cost:%f\n",finalCost[runNum]);
fprintf(logger, "Total time:%f\n",totalTime[runNum]);
fprintf(logger, "Per iteration time:%f\n",iterTime[runNum]);
fprintf(logger, "Total iteration time:%f\n",iterTime[runNum]*numIter[runNum]);
if(method == 2) // d2-seeding
{
fprintf(logger,"samplingTime_1:%f\n",mean(samplingTime_1,NUM_CLUSTER));
fprintf(logger,"samplingTime_2:%f\n",mean(samplingTime_2,NUM_CLUSTER));
}
fclose(logger);
free(cluster_counts);
free(cluster_sums);
free(cluster_counts_pointers);
free(cluster_sums_pointers);
free(centers);
cudaFreeHost(distances); // free this way when using page-locked memory for distances
// free(distances);
free(rnd);
free(multiset);
free(partition_sums);
}
logger = fopen(resultFile,"w");
fprintf(logger, "Initial cost: %f %f\n",mean(initCost,numRuns),sd(initCost,numRuns));
fprintf(logger, "Final cost: %f %f\n",mean(finalCost,numRuns),sd(finalCost,numRuns));
fprintf(logger, "Number of iterations: %f %f\n",mean(numIter,numRuns),sd(numIter,numRuns));
fprintf(logger, "Initialization time: %f %f\n",mean(initTime,numRuns),sd(initTime,numRuns));
fprintf(logger, "Per iteration time: %f %f\n",mean(iterTime,numRuns),sd(iterTime,numRuns));
fclose(logger);
return 0;
}
int sample_from_distribution(double* probabilities, int startIndex, int endIndex, double prob)
{
int start = startIndex,end = endIndex - 1;
int mid;
while(start <= end)
{
mid = (start+end)/2;
if(prob < probabilities[mid-1])
{
end = mid-1;
}
else if(prob > probabilities[mid])
{
start = mid+1;
}
else
{
break;
}
}
return mid;
}
// GPU version of sampling code
__global__ void sample_from_distribution_gpu(double* dev_partition_sums, double* dev_distances, int* dev_sampled_indices, double* dev_rnd,int per_thread, int dev_NUM_POINTS, int dev_N)
{
int numValidPartitions = dev_NUM_POINTS/per_thread + 1;
int start,mid,end,groupNo,pointIndex;
double prob;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < dev_N)
{
// first pick a block from the local_sums distribution
// int groupNo =sample_from_distribution(partition_sums,0, numValidPartitions, rnd[2*i]*partition_sums[numValidPartitions-1]);
start = 0;
end = numValidPartitions - 1;
prob = dev_rnd[2*i]*dev_partition_sums[end];
while(start <= end)
{
mid = (start+end)/2;
if(prob < dev_partition_sums[mid-1])
{
end = mid-1;
}
else if(prob > dev_partition_sums[mid])
{
start = mid+1;
}
else
{
break;
}
}
groupNo = mid;
// the start and end index of this block
// int startIndex = groupNo*per_thread;
// int endIndex = min((groupNo + 1)*per_thread, NUM_POINTS);
// now sample from the cumulative distribution of the block
// int pointIndex = sample_from_distribution(distances, startIndex, endIndex, rnd[2*i+1]*distances[endIndex-1]);
start = groupNo*per_thread;
end = min((groupNo + 1)*per_thread, NUM_POINTS) - 1;
prob = dev_rnd[2*i+1]*dev_distances[end];
while(start <= end)
{
mid = (start+end)/2;
if(prob < dev_distances[mid-1])
{
end = mid-1;
}
else if(prob > dev_distances[mid])
{
start = mid+1;
}
else
{
break;
}
}
pointIndex = mid;
dev_sampled_indices[i] = pointIndex;
}
}
// Sampling for case of strided memory access pattern, no dev_partition_sums here
__global__ void sample_from_distribution_gpu_strided(double* dev_distances, int* dev_sampled_indices, double* dev_rnd, int dev_NUM_POINTS, int dev_N)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < dev_N)
{
int start,mid,end,pointIndex;
double prob;
// the start and end index of this block
// int startIndex = groupNo*per_thread;
// int endIndex = min((groupNo + 1)*per_thread, NUM_POINTS);
// now sample from the cumulative distribution of the block
// int pointIndex = sample_from_distribution(distances, startIndex, endIndex, rnd[2*i+1]*distances[endIndex-1]);
start = 0;
end = dev_NUM_POINTS - 1;
prob = dev_rnd[i]*dev_distances[end];
mid = (start+end)/2;
while(start <= end)
{
mid = (start+end)/2;
if(prob < dev_distances[mid-1])
{
end = mid-1;
}
else if(prob > dev_distances[mid])
{
start = mid+1;
}
else
{
break;
}
}
pointIndex = mid;
dev_sampled_indices[i] = pointIndex;
}
}
// This function calcuates required distance for all points and partitions
// Need to do an all-prefix sum after this to make this thing cumulative
// Can be optimized by using distances calculated in previous iteration, i.e. when the previous center was sampled
// This does not do any sampling business
// Need not call this function when centerIter = 0,
// Not optimized to use distance calculted in previous iteration to calculate distance/cost for points
__global__ void comp_dist_2(double* dev_data,double* dev_distances,double* dev_partition_sums, double* dev_centers,int centerIter,int numPoints,int dev_dimension,int numGPUThreads)
{
// Starting off with very simplistic 1-D threads blocks and 1-D grids
int tid = threadIdx.x + blockIdx.x*blockDim.x;
// int jump = blockDim.x*gridDim.x;
int per_thread = (numPoints + numGPUThreads - 1)/numGPUThreads;// Term in the numerator is added to that we can get ceiling of numPoints/numGPUThreads
int startIndex = tid*per_thread;
int endIndex = min((tid + 1)*per_thread,numPoints);
double min_dist = DBL_MAX, local_dist,temp,prev_val = 0;
for (int dataIndex = startIndex; dataIndex < endIndex; ++dataIndex)
{
min_dist = DBL_MAX;
for (int i = 0; i < centerIter; ++i)
{
local_dist = 0;
for (int j = 0; j < dev_dimension; ++j)
{
temp = dev_data[dataIndex*dev_dimension + j] - dev_centers[i*dev_dimension + j];
local_dist += temp*temp;
}
min_dist = min(min_dist,local_dist);
}
dev_distances[dataIndex] = min_dist*min_dist + prev_val;
// dev_distances[dataIndex] = min_dist*min_dist;
prev_val = dev_distances[dataIndex];
}
dev_partition_sums[tid] = prev_val;
}
// Optimised to use previous distance values to calculate min_dist for points in next iteration
__global__ void comp_dist(double* dev_data,double* dev_distances,double* dev_partition_sums, double* dev_centers,int centerIter,int numPoints,int dev_dimension,int numGPUThreads)
{
// Starting off with very simplistic 1-D threads blocks and 1-D grids
int tid = threadIdx.x + blockIdx.x*blockDim.x;
int per_thread = (numPoints + numGPUThreads - 1)/numGPUThreads;// Term in the numerator is added to that we can get ceiling of numPoints/numGPUThreads
int startIndex = tid*per_thread;
int endIndex = min((tid + 1)*per_thread,numPoints);
double min_dist = DBL_MAX, local_dist,temp,prev_val = 0,old_prev_val=0;
for (int dataIndex = startIndex; dataIndex < endIndex; ++dataIndex)
{
if (centerIter == 1) // This is the first time dev_distances will get its values
{
min_dist = 0;
int i = 0;
for (int j = 0; j < dev_dimension; ++j)
{
temp = dev_data[dataIndex*dev_dimension + j] - dev_centers[i*dev_dimension + j];
min_dist += temp*temp;
}
dev_distances[dataIndex] = min_dist*min_dist + prev_val; // make it cumulative as you calculate it
prev_val = dev_distances[dataIndex];
}
else
{
int i = centerIter - 1; // i denotes the last center that was added to the list of centers
min_dist = dev_distances[dataIndex] - old_prev_val;
old_prev_val= dev_distances[dataIndex];
local_dist = 0;
for (int j = 0; j < dev_dimension; ++j)
{
temp = dev_data[dataIndex*dev_dimension + j] - dev_centers[i*dev_dimension + j];
local_dist += temp*temp;
}
min_dist = min(min_dist,local_dist*local_dist);
dev_distances[dataIndex] = min_dist + prev_val; // No need to square min_dist here, it is already squared value
prev_val = dev_distances[dataIndex];
}
}
dev_partition_sums[tid] = prev_val;
}
// Optimised to use previous distance values to calculate min_dist for points in next iteration
// Also makes use of constant memory for storing centers
__global__ void comp_dist_glbl(double* dev_data,double* dev_distances,double* dev_partition_sums,int centerIter,int numPoints,int dev_dimension,int numGPUThreads)
{
// Starting off with very simplistic 1-D threads blocks and 1-D grids
int tid = threadIdx.x + blockIdx.x*blockDim.x;
int per_thread = (numPoints + numGPUThreads - 1)/numGPUThreads;// Term in the numerator is added to that we can get ceiling of numPoints/numGPUThreads
int startIndex = tid*per_thread;
int endIndex = min((tid + 1)*per_thread,numPoints);
double min_dist = DBL_MAX, local_dist,temp,prev_val = 0,old_prev_val=0;
for (int dataIndex = startIndex; dataIndex < endIndex; ++dataIndex)
{
if (centerIter == 1) // This is the first time dev_distances will get its values
{
min_dist = 0;
int i = 0;
for (int j = 0; j < dev_dimension; ++j)
{
temp = dev_data[dataIndex*dev_dimension + j] - dev_centers_global[i*dev_dimension + j];
min_dist += temp*temp;
}
dev_distances[dataIndex] = min_dist*min_dist + prev_val; // make it cumulative as you calculate it
prev_val = dev_distances[dataIndex];
}
else
{
int i = centerIter - 1; // i denotes the last center that was added to the list of centers
min_dist = dev_distances[dataIndex] - old_prev_val;
old_prev_val= dev_distances[dataIndex];
local_dist = 0;
for (int j = 0; j < dev_dimension; ++j)
{
temp = dev_data[dataIndex*dev_dimension + j] - dev_centers_global[i*dev_dimension + j];
local_dist += temp*temp;
}
min_dist = min(min_dist,local_dist*local_dist);
dev_distances[dataIndex] = min_dist + prev_val; // No need to square min_dist here, it is already squared value
prev_val = dev_distances[dataIndex];
}
}
dev_partition_sums[tid] = prev_val;
}
// This addtionally does things in strided fashion as opposed to assigning each thread some fixed block
__global__ void comp_dist_glbl_strided(double* dev_data,double* dev_distances,double* dev_partition_sums,int centerIter,int numPoints,int dev_dimension,int numGPUThreads)
{
int dataIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
double min_dist, local_dist, temp;
while(dataIndex < numPoints)
{
if (centerIter == 1) // This is the first time dev_distances will get its values
{
min_dist = 0;
for (int j = 0; j < dev_dimension; ++j)
{
temp = dev_data[dataIndex*dev_dimension + j] - dev_centers_global[j]; // Accessing 0th center of dev_center_global
min_dist += temp*temp;
}
dev_distances[dataIndex] = min_dist*min_dist;
}
else
{
// Assuming that dev_distances has been made cumulative, after this function call
// if (dataIndex == 0)
// {
// min_dist = dev_distances[dataIndex];
// }
// else
// {
// min_dist = dev_distances[dataIndex] - dev_distances[dataIndex - 1];
// }
min_dist = DBL_MAX;
for (int i = 0; i < centerIter; ++i)
{
local_dist = 0;
for (int j = 0; j < dev_dimension; ++j)
{
temp = dev_data[dataIndex*dev_dimension + j] - dev_centers_global[i*dev_dimension + j];
local_dist += temp*temp;
}
min_dist = min(min_dist,local_dist*local_dist);
}
dev_distances[dataIndex] = min_dist; // --No-- Need to square min_dist here, it is *not* already squared value
}
dataIndex += stride;
}
}
// generate numSamples sized multiset from weighted data with weights wrt. centers where the current size of centers is size
// numPts : number of points in data
// numSamples: number of points to sample
// size : size of centers i.e. number of centers chosen already
double* d2_sample(double* data,double* centers,int numPts, int numSamples, int size)
{
// cumulative probability for each group of points
// the distances are cumulative only for a group. So, [0,...,numPts/numThreads], [numPts/numThreads+1,...,numPts*2/numThreads],... and so on. These groups contain cumulative distances.
double* distances = (double*)malloc(numPts*sizeof(double));
double* local_sums = (double*)malloc(numThreads*sizeof(double)); // local sums. first is sum for [0...numPts/numThreads-1], and so on. This is also a cumulative distribution.
double* result = (double*)malloc(numSamples*DIMENSION*sizeof(double));
for (int i = 0; i < numSamples; ++i)
{
for (int j = 0; j < DIMENSION; ++j)
{
result[i*DIMENSION + j] = 0;
}
}
// we're gonna need 2*numSamples random numbers.
double* rnd = (double*)malloc(2*numSamples*sizeof(double));
int i;
for(i = 0; i < 2*numSamples; i++){
rnd[i] = ((double) rand())/RAND_MAX;
}
#pragma omp parallel
{
struct timeval start,end;
gettimeofday(&start,NULL);
// create blocks of data
int tid = omp_get_thread_num();
int per_thread = (numPts + numThreads - 1) / numThreads;
int lower = tid * per_thread;
int higher = (tid + 1) * per_thread;
if(tid == numThreads - 1) higher = numPts;
int block_size = higher - lower;
double min_dist, local_dist;
double* p;
double prev_val = 0;
// cost of each block
double local_sum = 0;
int center_size = size;
int i,j;
for(i = 0;i < block_size;i++)
{
if(center_size == 0){
local_sum += 1;
distances[lower+i] = 1 + prev_val;
} else{
p = data + (lower+i)*DIMENSION;
min_dist = distance(p,centers);
for (j = 1; j < center_size; j++) {
local_dist = distance(p, centers + j*DIMENSION);
min_dist = min(min_dist, local_dist); // calculating minimum distances
}
local_sum += min_dist * min_dist;
distances[lower+i] = min_dist * min_dist + prev_val; // make cumulative
}
prev_val = distances[lower+i];
}
local_sums[tid] = local_sum;
#pragma omp barrier // everyone is here now
#pragma omp master
{
for(int i=1;i<numThreads;i++){
local_sums[i] = local_sums[i] + local_sums[i-1]; // make cumulative
}
// printf("Number of threads::%d\n",omp_get_num_threads());
}
gettimeofday(&end,NULL);
float cost_time = get_time_diff(start,end);
#pragma omp barrier
gettimeofday(&start,NULL);
#pragma omp for
for(int i = 0;i < numSamples;i++){
// first pick a block from the local_sums distribution
int groupNo = sample_from_distribution(local_sums, 0, numThreads, rnd[i*2]*local_sums[numThreads-1]);
// the start and end index of this block
int startIndex = groupNo * per_thread;
int endIndex = (groupNo + 1) * per_thread;
if(groupNo == numThreads - 1) endIndex = numPts;
// now sample from the cumulative distribution of the block
int pointIndex = sample_from_distribution(distances, startIndex, endIndex, rnd[2*i+1]*distances[endIndex-1]);
for (int j = 0; j < DIMENSION; ++j)
{
result[i*DIMENSION + j] = data[pointIndex*DIMENSION + j];
}
// memcpy(result + i*DIMENSION, data + pointIndex*DIMENSION, DIMENSION*sizeof(double));
}
gettimeofday(&end,NULL);
float sample_time = get_time_diff(start,end);
// if (center_size >= 99)
// {
// printf("Cost computation time ::%f\n",cost_time);
// printf("Sampling time ::%f\n",sample_time);
// }
}
free(distances);
free(local_sums);
return result;
}
// This version of d2_sample has optimized cost calculation by using cost computed in last iteration
double* d2_sample_2(double* data,double* centers,int numPts, int numSamples, int size, double* distances)
{
// cumulative probability for each group of points
// the distances are cumulative only for a group. So, [0,...,numPts/numThreads], [numPts/numThreads+1,...,numPts*2/numThreads],... and so on. These groups contain cumulative distances.
double* local_sums = (double*)malloc(numThreads*sizeof(double)); // local sums. first is sum for [0...numPts/numThreads-1], and so on. This is also a cumulative distribution.
double* result = (double*)malloc(numSamples*DIMENSION*sizeof(double));
for (int i = 0; i < numSamples; ++i)
{
for (int j = 0; j < DIMENSION; ++j)
{
result[i*DIMENSION + j] = 0;
}
}
// we're gonna need 2*numSamples random numbers.
double* rnd = (double*)malloc(2*numSamples*sizeof(double));
int i;
for(i = 0; i < 2*numSamples; i++){
rnd[i] = ((double) rand())/RAND_MAX;
}
#pragma omp parallel
{
struct timeval start,end;
gettimeofday(&start,NULL);
// create blocks of data
int tid = omp_get_thread_num();
int per_thread = (numPts + numThreads - 1) / numThreads;
int lower = tid * per_thread;
int higher = (tid + 1) * per_thread;
if(tid == numThreads - 1) higher = numPts;
int block_size = higher - lower;
double min_dist, local_dist;
double* p;
double prev_val = 0, old_prev_val = 0;
// cost of each block
double local_sum = 0;
int center_size = size;
int i;
for(i = 0;i < block_size;i++)
{
if(center_size == 0)
{
local_sum += 1;
distances[lower+i] = 1 + prev_val;
}
else if (center_size == 1)
{
p = data + (lower+i)*DIMENSION;
min_dist = distance(p,centers);
local_sum += min_dist * min_dist;
distances[lower+i] = min_dist * min_dist + prev_val; // make cumulative
}
else
{
p = data + (lower+i)*DIMENSION;
// min_dist = distance(p,centers[0]);
min_dist = distances[lower+i] - old_prev_val;
old_prev_val = distances[lower+i]; // Important for it to have old value of distance[lower+i] so that min_dist is correct in next iteration
local_dist = distance(p,centers + (center_size-1)*DIMENSION); // Find distance wrt last added new center;
local_dist = local_dist*local_dist;
min_dist = min(min_dist,local_dist);
local_sum += min_dist; // min_dist is already squared here because it is calculated usign cumulative distance
distances[lower+i] = min_dist + prev_val; // make cumulative
prev_val = distances[lower+i];
}
prev_val = distances[lower+i];
}
local_sums[tid] = local_sum;
#pragma omp barrier // everyone is here now
#pragma omp master
{
for(int i=1;i<numThreads;i++){
local_sums[i] = local_sums[i] + local_sums[i-1]; // make cumulative
}
}
gettimeofday(&end,NULL);
float cost_time = get_time_diff(start,end);
#pragma omp barrier
gettimeofday(&start,NULL);
#pragma omp for
for(int i = 0;i < numSamples;i++){
// first pick a block from the local_sums distribution
int groupNo = sample_from_distribution(local_sums, 0, numThreads, rnd[i*2]*local_sums[numThreads-1]);
// the start and end index of this block
int startIndex = groupNo * per_thread;
int endIndex = (groupNo + 1) * per_thread;
if(groupNo == numThreads - 1) endIndex = numPts;
// now sample from the cumulative distribution of the block
int pointIndex = sample_from_distribution(distances, startIndex, endIndex, rnd[2*i+1]*distances[endIndex-1]);
for (int j = 0; j < DIMENSION; ++j)
{
result[i*DIMENSION + j] = data[pointIndex*DIMENSION + j];
}
// memcpy(result + i*DIMENSION, data + pointIndex*DIMENSION, DIMENSION*sizeof(double));
}
gettimeofday(&end,NULL);
float sample_time = get_time_diff(start,end);
// if (center_size >= 99)
// {
// printf("Cost computation time ::%f\n",cost_time);
// printf("Sampling time ::%f\n",sample_time);
// }
}
free(local_sums);
return result;
}
double* mean_heuristic(double* multiset,int multisetSize)
{
// first do a kmeans++ initialiation on the multiset
int i,j;
// gettimeofday(&start,NULL);
double* level_2_sample = (double*)malloc(NUM_CLUSTER*DIMENSION*sizeof(double));
for(i = 0; i < NUM_CLUSTER; i++)
{
double* point = d2_sample(multiset,level_2_sample,multisetSize,1,i);
for (j = 0; j < DIMENSION; ++j)
{
level_2_sample[i*DIMENSION + j] = point[j];
}
// memcpy(level_2_sample + i*DIMENSION, point, DIMENSION*sizeof(double)) ;
}
// gettimeofday(&end,NULL);
// printf("Time taken to choose k centers::%f\n",get_time_diff(start,end));
// gettimeofday(&start,NULL);
int* counts = (int*)malloc(NUM_CLUSTER*sizeof(int)); // number of points assigned to each kmeans++ center
double* cluster_means = (double*)malloc(NUM_CLUSTER*DIMENSION*sizeof(double)); // for taking the centroid later on. We maintain a sum of all points assigned to a center here.
for (i = 0; i < NUM_CLUSTER; i++)
{
counts[i] = 0;
for(j = 0; j< DIMENSION; j++)
{
cluster_means[i*DIMENSION + j] = 0;
}
}
// here the heuristic does things in a parallel fashion
// maintain a local structure for each thread to keep track of cluster sums and counts
int** local_tmp_counts = (int**)malloc(numThreads*sizeof(int*));
double** local_tmp_cluster_means = (double**)malloc(numThreads*sizeof(double*));
#pragma omp parallel
{
int tid = omp_get_thread_num();
int* local_counts = (int*)malloc(NUM_CLUSTER*sizeof(int));
double* local_cluster_means = (double*)malloc(NUM_CLUSTER*DIMENSION*sizeof(double));
for (int i = 0; i < NUM_CLUSTER; i++)
{
local_counts[i] = 0;
for(int j = 0; j < DIMENSION; j++)
{
local_cluster_means[i*DIMENSION + j] = 0;
}
}
local_tmp_counts[tid] = local_counts; // save the pointers to local data structures
local_tmp_cluster_means[tid] = local_cluster_means;
double min_dist, tmp_dist;
int index;
#pragma omp for schedule(static)
for (int i = 0; i < multisetSize; i++)
{
min_dist = distance(level_2_sample,multiset + i*DIMENSION); // distance of each kmeans++ center from the points in sampled_set
index = 0;
for (int j = 1; j < NUM_CLUSTER; j++)
{
tmp_dist = distance(level_2_sample + j*DIMENSION, multiset+ i*DIMENSION); // figure out the minimum and assign the point to that kmeans++ center
if (tmp_dist < min_dist)
{
min_dist = tmp_dist;
index = j;
}
}
for(int j = 0; j < DIMENSION; j++)
{
local_cluster_means[index*DIMENSION + j] += multiset[i*DIMENSION + j];
}
local_counts[index]++;
}
// aggregate across all threads
#pragma omp for schedule(static)
for (int i = 0; i < NUM_CLUSTER; i++)
{
for (int p = 0; p < numThreads ; p++)
{
for(int j = 0; j < DIMENSION; j++)
{
cluster_means[i*DIMENSION + j] += local_tmp_cluster_means[p][i*DIMENSION + j];
}
counts[i] += local_tmp_counts[p][i];
}
}
free(local_counts);
free(local_cluster_means);
}
int max = counts[0];
int index = 0;
for (int i = 1; i < NUM_CLUSTER; i++)
{
if (counts[i] > max)
{
max = counts[i];
index = i; // largest cluster with maximum points from sampled_set assigned to it.
}
}
// gettimeofday(&end,NULL);
// printf("Time for finding partitions::%f\n",get_time_diff(start,end));
// do the scaling to find the mean
for(int i = 0; i < DIMENSION; i++){
cluster_means[index*DIMENSION + i] /= counts[index];
}
free(counts);
free(cluster_means);
free(local_tmp_counts);
free(local_tmp_cluster_means);
return cluster_means + index*DIMENSION;
}
double distance(double* p1, double* p2)
{
int i;
double localSum = 0;
for (i = 0; i < DIMENSION; ++i)
{
localSum += (p1[i] - p2[i])*(p1[i] - p2[i]);
}
return localSum;
}
void write_centers_to_file(double* centers)
{
FILE* writer = fopen("dataExchange.txt","w");
for (int i = 0; i < NUM_CLUSTER; ++i)
{
for (int j = 0; j < DIMENSION; ++j)
{
fprintf(writer, "%f ",centers[i*DIMENSION + j] );
}
fprintf(writer, "\n");
}
}
static inline float mean(double* a, int n)
{
double sum = 0;
for(int i = 0; i < n; i++){
sum += a[i];
}
return sum/n;
}
static inline float sd(double* a, int n)
{
double sum = 0;
for(int i = 0; i < n; i++){
sum += a[i];
}
double mean = sum/n;
sum = 0;
for(int i = 0; i < n; i++){
sum += (a[i] - mean) * (a[i] - mean);
}
return sqrt(sum/n);
}
static inline double get_time_diff(struct timeval t1, struct timeval t2){
return t2.tv_sec - t1.tv_sec + 1e-6 * (t2.tv_usec - t1.tv_usec);
}
|
d7353b640ce68381eeb793f8460760b940378c21.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define WARPS_PER_GROUP (THREAD_BLOCK_SIZE/TILE_SIZE)
#ifndef ENABLE_SHUFFLE
typedef struct {
real x, y, z;
real q;
real fx, fy, fz;
ATOM_PARAMETER_DATA
#ifndef PARAMETER_SIZE_IS_EVEN
real padding;
#endif
} AtomData;
#endif
#ifdef ENABLE_SHUFFLE
//support for 64 bit shuffles
static __inline__ __device__ float real_shfl(float var, int srcLane) {
return SHFL(var, srcLane);
}
static __inline__ __device__ double real_shfl(double var, int srcLane) {
int hi, lo;
asm volatile("mov.b64 { %0, %1 }, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
hi = SHFL(hi, srcLane);
lo = SHFL(lo, srcLane);
return __hiloint2double( hi, lo );
}
static __inline__ __device__ long long real_shfl(long long var, int srcLane) {
int hi, lo;
asm volatile("mov.b64 { %0, %1 }, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
hi = SHFL(hi, srcLane);
lo = SHFL(lo, srcLane);
// unforunately there isn't an __nv_hiloint2long(hi,lo) intrinsic cast
int2 fuse; fuse.x = lo; fuse.y = hi;
return *reinterpret_cast<long long*>(&fuse);
}
#endif
/**
* Compute nonbonded interactions. The kernel is separated into two parts,
* tiles with exclusions and tiles without exclusions. It relies heavily on
* implicit warp-level synchronization. A tile is defined by two atom blocks
* each of warpsize. Each warp computes a range of tiles.
*
* Tiles with exclusions compute the entire set of interactions across
* atom blocks, equal to warpsize*warpsize. In order to avoid access conflicts
* the forces are computed and accumulated diagonally in the manner shown below
* where, suppose
*
* [a-h] comprise atom block 1, [i-p] comprise atom block 2
*
* 1 denotes the first set of calculations within the warp
* 2 denotes the second set of calculations within the warp
* ... etc.
*
* threads
* 0 1 2 3 4 5 6 7
* atom1
* L a b c d e f g h
* o i 1 2 3 4 5 6 7 8
* c j 8 1 2 3 4 5 6 7
* a k 7 8 1 2 3 4 5 6
* l l 6 7 8 1 2 3 4 5
* D m 5 6 7 8 1 2 3 4
* a n 4 5 6 7 8 1 2 3
* t o 3 4 5 6 7 8 1 2
* a p 2 3 4 5 6 7 8 1
*
* Tiles without exclusions read off directly from the neighbourlist interactingAtoms
* and follows the same force accumulation method. If more there are more interactingTiles
* than the size of the neighbourlist initially allocated, the neighbourlist is rebuilt
* and the full tileset is computed. This should happen on the first step, and very rarely
* afterwards.
*
* On CUDA devices that support the shuffle intrinsic, on diagonal exclusion tiles use
* __shfl to broadcast. For all other types of tiles __shfl is used to pass around the
* forces, positions, and parameters when computing the forces.
*
* [out]forceBuffers - forces on each atom to eventually be accumulated
* [out]energyBuffer - energyBuffer to eventually be accumulated
* [in]posq - x,y,z,charge
* [in]exclusions - 1024-bit flags denoting atom-atom exclusions for each tile
* [in]exclusionTiles - x,y denotes the indices of tiles that have an exclusion
* [in]startTileIndex - index into first tile to be processed
* [in]numTileIndices - number of tiles this context is responsible for processing
* [in]int tiles - the atom block for each tile
* [in]interactionCount - total number of tiles that have an interaction
* [in]maxTiles - stores the size of the neighbourlist in case it needs
* - to be expanded
* [in]periodicBoxSize - size of the Periodic Box, last dimension (w) not used
* [in]invPeriodicBox - inverse of the periodicBoxSize, pre-computed for speed
* [in]blockCenter - the center of each block in euclidean coordinates
* [in]blockSize - size of the each block, radiating from the center
* - x is half the distance of total length
* - y is half the distance of total width
* - z is half the distance of total height
* - w is not used
* [in]interactingAtoms - a list of interactions within a given tile
*
*/
extern "C" __global__ void computeNonbonded(
unsigned long long* __restrict__ forceBuffers, mixed* __restrict__ energyBuffer, const real4* __restrict__ posq, const tileflags* __restrict__ exclusions,
const ushort2* __restrict__ exclusionTiles, unsigned int startTileIndex, unsigned int numTileIndices
#ifdef USE_CUTOFF
, const int* __restrict__ tiles, const unsigned int* __restrict__ interactionCount, real4 periodicBoxSize, real4 invPeriodicBoxSize,
real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, unsigned int maxTiles, const real4* __restrict__ blockCenter,
const real4* __restrict__ blockSize, const unsigned int* __restrict__ interactingAtoms, unsigned int maxSinglePairs,
const int2* __restrict__ singlePairs
#endif
PARAMETER_ARGUMENTS) {
const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE;
const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE; // global warpIndex
const unsigned int tgx = threadIdx.x & (TILE_SIZE-1); // index within the warp
const unsigned int tbx = threadIdx.x - tgx; // block warpIndex
mixed energy = 0;
INIT_DERIVATIVES
// used shared memory if the device cannot shuffle
#ifndef ENABLE_SHUFFLE
__shared__ AtomData localData[THREAD_BLOCK_SIZE];
#endif
// First loop: process tiles that contain exclusions.
const unsigned int firstExclusionTile = FIRST_EXCLUSION_TILE+warp*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
const unsigned int lastExclusionTile = FIRST_EXCLUSION_TILE+(warp+1)*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
for (int pos = firstExclusionTile; pos < lastExclusionTile; pos++) {
const ushort2 tileIndices = exclusionTiles[pos];
const unsigned int x = tileIndices.x;
const unsigned int y = tileIndices.y;
real3 force = make_real3(0);
unsigned int atom1 = x*TILE_SIZE + tgx;
real4 posq1 = posq[atom1];
LOAD_ATOM1_PARAMETERS
#ifdef USE_EXCLUSIONS
tileflags excl = exclusions[pos*TILE_SIZE+tgx];
#endif
const bool hasExclusions = true;
if (x == y) {
// This tile is on the diagonal.
#ifdef ENABLE_SHUFFLE
real4 shflPosq = posq1;
#else
localData[threadIdx.x].x = posq1.x;
localData[threadIdx.x].y = posq1.y;
localData[threadIdx.x].z = posq1.z;
localData[threadIdx.x].q = posq1.w;
LOAD_LOCAL_PARAMETERS_FROM_1
#endif
// we do not need to fetch parameters from global since this is a symmetric tile
// instead we can broadcast the values using shuffle
for (unsigned int j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+j;
real4 posq2;
#ifdef ENABLE_SHUFFLE
BROADCAST_WARP_DATA
#else
posq2 = make_real4(localData[atom2].x, localData[atom2].y, localData[atom2].z, localData[atom2].q);
#endif
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = y*TILE_SIZE+j;
#ifdef USE_SYMMETRIC
real dEdR = 0.0f;
#else
real3 dEdR1 = make_real3(0);
real3 dEdR2 = make_real3(0);
#endif
#ifdef USE_EXCLUSIONS
bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS || !(excl & 0x1));
#endif
real tempEnergy = 0.0f;
const real interactionScale = 0.5f;
COMPUTE_INTERACTION
energy += 0.5f*tempEnergy;
#ifdef INCLUDE_FORCES
#ifdef USE_SYMMETRIC
force.x -= delta.x*dEdR;
force.y -= delta.y*dEdR;
force.z -= delta.z*dEdR;
#else
force.x -= dEdR1.x;
force.y -= dEdR1.y;
force.z -= dEdR1.z;
#endif
#endif
#ifdef USE_EXCLUSIONS
excl >>= 1;
#endif
}
}
else {
// This is an off-diagonal tile.
unsigned int j = y*TILE_SIZE + tgx;
real4 shflPosq = posq[j];
#ifdef ENABLE_SHUFFLE
real3 shflForce;
shflForce.x = 0.0f;
shflForce.y = 0.0f;
shflForce.z = 0.0f;
#else
localData[threadIdx.x].x = shflPosq.x;
localData[threadIdx.x].y = shflPosq.y;
localData[threadIdx.x].z = shflPosq.z;
localData[threadIdx.x].q = shflPosq.w;
localData[threadIdx.x].fx = 0.0f;
localData[threadIdx.x].fy = 0.0f;
localData[threadIdx.x].fz = 0.0f;
#endif
DECLARE_LOCAL_PARAMETERS
LOAD_LOCAL_PARAMETERS_FROM_GLOBAL
#ifdef USE_EXCLUSIONS
excl = (excl >> tgx) | (excl << (TILE_SIZE - tgx));
#endif
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+tj;
#ifdef ENABLE_SHUFFLE
real4 posq2 = shflPosq;
#else
real4 posq2 = make_real4(localData[atom2].x, localData[atom2].y, localData[atom2].z, localData[atom2].q);
#endif
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = y*TILE_SIZE+tj;
#ifdef USE_SYMMETRIC
real dEdR = 0.0f;
#else
real3 dEdR1 = make_real3(0);
real3 dEdR2 = make_real3(0);
#endif
#ifdef USE_EXCLUSIONS
bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS || !(excl & 0x1));
#endif
real tempEnergy = 0.0f;
const real interactionScale = 1.0f;
COMPUTE_INTERACTION
energy += tempEnergy;
#ifdef INCLUDE_FORCES
#ifdef USE_SYMMETRIC
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += delta.x;
shflForce.y += delta.y;
shflForce.z += delta.z;
#else
localData[tbx+tj].fx += delta.x;
localData[tbx+tj].fy += delta.y;
localData[tbx+tj].fz += delta.z;
#endif
#else // !USE_SYMMETRIC
force.x -= dEdR1.x;
force.y -= dEdR1.y;
force.z -= dEdR1.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += dEdR2.x;
shflForce.y += dEdR2.y;
shflForce.z += dEdR2.z;
#else
localData[tbx+tj].fx += dEdR2.x;
localData[tbx+tj].fy += dEdR2.y;
localData[tbx+tj].fz += dEdR2.z;
#endif
#endif // end USE_SYMMETRIC
#endif
#ifdef ENABLE_SHUFFLE
SHUFFLE_WARP_DATA
#endif
#ifdef USE_EXCLUSIONS
excl >>= 1;
#endif
// cycles the indices
// 0 1 2 3 4 5 6 7 -> 1 2 3 4 5 6 7 0
tj = (tj + 1) & (TILE_SIZE - 1);
}
const unsigned int offset = y*TILE_SIZE + tgx;
// write results for off diagonal tiles
#ifdef INCLUDE_FORCES
#ifdef ENABLE_SHUFFLE
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (shflForce.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (shflForce.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (shflForce.z*0x100000000)));
#else
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fx*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fy*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fz*0x100000000)));
#endif
#endif
}
// Write results for on and off diagonal tiles
#ifdef INCLUDE_FORCES
const unsigned int offset = x*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000)));
#endif
}
// Second loop: tiles without exclusions, either from the neighbor list (with cutoff) or just enumerating all
// of them (no cutoff).
#ifdef USE_CUTOFF
const unsigned int numTiles = interactionCount[0];
if (numTiles > maxTiles)
return; // There wasn't enough memory for the neighbor list.
int pos = (int) (numTiles > maxTiles ? startTileIndex+warp*(long long)numTileIndices/totalWarps : warp*(long long)numTiles/totalWarps);
int end = (int) (numTiles > maxTiles ? startTileIndex+(warp+1)*(long long)numTileIndices/totalWarps : (warp+1)*(long long)numTiles/totalWarps);
#else
const unsigned int numTiles = numTileIndices;
int pos = (int) (startTileIndex+warp*(long long)numTiles/totalWarps);
int end = (int) (startTileIndex+(warp+1)*(long long)numTiles/totalWarps);
#endif
int skipBase = 0;
int currentSkipIndex = tbx;
// atomIndices can probably be shuffled as well
// but it probably wouldn't make things any faster
__shared__ int atomIndices[THREAD_BLOCK_SIZE];
__shared__ volatile int skipTiles[THREAD_BLOCK_SIZE];
skipTiles[threadIdx.x] = -1;
while (pos < end) {
const bool hasExclusions = false;
real3 force = make_real3(0);
bool includeTile = true;
// Extract the coordinates of this tile.
int x, y;
bool singlePeriodicCopy = false;
#ifdef USE_CUTOFF
x = tiles[pos];
real4 blockSizeX = blockSize[x];
singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= MAX_CUTOFF &&
0.5f*periodicBoxSize.y-blockSizeX.y >= MAX_CUTOFF &&
0.5f*periodicBoxSize.z-blockSizeX.z >= MAX_CUTOFF);
#else
y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos));
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error.
y += (x < y ? -1 : 1);
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
}
// Skip over tiles that have exclusions, since they were already processed.
while (skipTiles[tbx+TILE_SIZE-1] < pos) {
if (skipBase+tgx < NUM_TILES_WITH_EXCLUSIONS) {
ushort2 tile = exclusionTiles[skipBase+tgx];
skipTiles[threadIdx.x] = tile.x + tile.y*NUM_BLOCKS - tile.y*(tile.y+1)/2;
}
else
skipTiles[threadIdx.x] = end;
skipBase += TILE_SIZE;
currentSkipIndex = tbx;
}
while (skipTiles[currentSkipIndex] < pos)
currentSkipIndex++;
includeTile = (skipTiles[currentSkipIndex] != pos);
#endif
if (includeTile) {
unsigned int atom1 = x*TILE_SIZE + tgx;
// Load atom data for this tile.
real4 posq1 = posq[atom1];
LOAD_ATOM1_PARAMETERS
//const unsigned int localAtomIndex = threadIdx.x;
#ifdef USE_CUTOFF
unsigned int j = interactingAtoms[pos*TILE_SIZE+tgx];
#else
unsigned int j = y*TILE_SIZE + tgx;
#endif
atomIndices[threadIdx.x] = j;
#ifdef ENABLE_SHUFFLE
DECLARE_LOCAL_PARAMETERS
real4 shflPosq;
real3 shflForce;
shflForce.x = 0.0f;
shflForce.y = 0.0f;
shflForce.z = 0.0f;
#endif
if (j < PADDED_NUM_ATOMS) {
// Load position of atom j from from global memory
#ifdef ENABLE_SHUFFLE
shflPosq = posq[j];
#else
localData[threadIdx.x].x = posq[j].x;
localData[threadIdx.x].y = posq[j].y;
localData[threadIdx.x].z = posq[j].z;
localData[threadIdx.x].q = posq[j].w;
localData[threadIdx.x].fx = 0.0f;
localData[threadIdx.x].fy = 0.0f;
localData[threadIdx.x].fz = 0.0f;
#endif
LOAD_LOCAL_PARAMETERS_FROM_GLOBAL
}
else {
#ifdef ENABLE_SHUFFLE
shflPosq = make_real4(0, 0, 0, 0);
#else
localData[threadIdx.x].x = 0;
localData[threadIdx.x].y = 0;
localData[threadIdx.x].z = 0;
#endif
}
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
// The box is small enough that we can just translate all the atoms into a single periodic
// box, then skip having to apply periodic boundary conditions later.
real4 blockCenterX = blockCenter[x];
APPLY_PERIODIC_TO_POS_WITH_CENTER(posq1, blockCenterX)
#ifdef ENABLE_SHUFFLE
APPLY_PERIODIC_TO_POS_WITH_CENTER(shflPosq, blockCenterX)
#else
APPLY_PERIODIC_TO_POS_WITH_CENTER(localData[threadIdx.x], blockCenterX)
#endif
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+tj;
#ifdef ENABLE_SHUFFLE
real4 posq2 = shflPosq;
#else
real4 posq2 = make_real4(localData[atom2].x, localData[atom2].y, localData[atom2].z, localData[atom2].q);
#endif
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = atomIndices[tbx+tj];
#ifdef USE_SYMMETRIC
real dEdR = 0.0f;
#else
real3 dEdR1 = make_real3(0);
real3 dEdR2 = make_real3(0);
#endif
#ifdef USE_EXCLUSIONS
bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS);
#endif
real tempEnergy = 0.0f;
const real interactionScale = 1.0f;
COMPUTE_INTERACTION
energy += tempEnergy;
#ifdef INCLUDE_FORCES
#ifdef USE_SYMMETRIC
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += delta.x;
shflForce.y += delta.y;
shflForce.z += delta.z;
#else
localData[tbx+tj].fx += delta.x;
localData[tbx+tj].fy += delta.y;
localData[tbx+tj].fz += delta.z;
#endif
#else // !USE_SYMMETRIC
force.x -= dEdR1.x;
force.y -= dEdR1.y;
force.z -= dEdR1.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += dEdR2.x;
shflForce.y += dEdR2.y;
shflForce.z += dEdR2.z;
#else
localData[tbx+tj].fx += dEdR2.x;
localData[tbx+tj].fy += dEdR2.y;
localData[tbx+tj].fz += dEdR2.z;
#endif
#endif // end USE_SYMMETRIC
#endif
#ifdef ENABLE_SHUFFLE
SHUFFLE_WARP_DATA
#endif
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
else
#endif
{
// We need to apply periodic boundary conditions separately for each interaction.
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+tj;
#ifdef ENABLE_SHUFFLE
real4 posq2 = shflPosq;
#else
real4 posq2 = make_real4(localData[atom2].x, localData[atom2].y, localData[atom2].z, localData[atom2].q);
#endif
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = atomIndices[tbx+tj];
#ifdef USE_SYMMETRIC
real dEdR = 0.0f;
#else
real3 dEdR1 = make_real3(0);
real3 dEdR2 = make_real3(0);
#endif
#ifdef USE_EXCLUSIONS
bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS);
#endif
real tempEnergy = 0.0f;
const real interactionScale = 1.0f;
COMPUTE_INTERACTION
energy += tempEnergy;
#ifdef INCLUDE_FORCES
#ifdef USE_SYMMETRIC
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += delta.x;
shflForce.y += delta.y;
shflForce.z += delta.z;
#else
localData[tbx+tj].fx += delta.x;
localData[tbx+tj].fy += delta.y;
localData[tbx+tj].fz += delta.z;
#endif
#else // !USE_SYMMETRIC
force.x -= dEdR1.x;
force.y -= dEdR1.y;
force.z -= dEdR1.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += dEdR2.x;
shflForce.y += dEdR2.y;
shflForce.z += dEdR2.z;
#else
localData[tbx+tj].fx += dEdR2.x;
localData[tbx+tj].fy += dEdR2.y;
localData[tbx+tj].fz += dEdR2.z;
#endif
#endif // end USE_SYMMETRIC
#endif
#ifdef ENABLE_SHUFFLE
SHUFFLE_WARP_DATA
#endif
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
// Write results.
#ifdef INCLUDE_FORCES
atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (force.x*0x100000000)));
atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000)));
atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000)));
#ifdef USE_CUTOFF
unsigned int atom2 = atomIndices[threadIdx.x];
#else
unsigned int atom2 = y*TILE_SIZE + tgx;
#endif
if (atom2 < PADDED_NUM_ATOMS) {
#ifdef ENABLE_SHUFFLE
atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (shflForce.x*0x100000000)));
atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (shflForce.y*0x100000000)));
atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (shflForce.z*0x100000000)));
#else
atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fx*0x100000000)));
atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fy*0x100000000)));
atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fz*0x100000000)));
#endif
}
#endif
}
pos++;
}
// Third loop: single pairs that aren't part of a tile.
#if USE_CUTOFF
const unsigned int numPairs = interactionCount[1];
if (numPairs > maxSinglePairs)
return; // There wasn't enough memory for the neighbor list.
for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < numPairs; i += blockDim.x*gridDim.x) {
int2 pair = singlePairs[i];
int atom1 = pair.x;
int atom2 = pair.y;
real4 posq1 = posq[atom1];
real4 posq2 = posq[atom2];
LOAD_ATOM1_PARAMETERS
int j = atom2;
atom2 = threadIdx.x;
DECLARE_LOCAL_PARAMETERS
LOAD_LOCAL_PARAMETERS_FROM_GLOBAL
LOAD_ATOM2_PARAMETERS
atom2 = pair.y;
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real invR = RSQRT(r2);
real r = r2*invR;
#ifdef USE_SYMMETRIC
real dEdR = 0.0f;
#else
real3 dEdR1 = make_real3(0);
real3 dEdR2 = make_real3(0);
#endif
bool hasExclusions = false;
bool isExcluded = false;
real tempEnergy = 0.0f;
const real interactionScale = 1.0f;
COMPUTE_INTERACTION
energy += tempEnergy;
#ifdef INCLUDE_FORCES
#ifdef USE_SYMMETRIC
real3 dEdR1 = delta*dEdR;
real3 dEdR2 = -dEdR1;
#endif
atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (-dEdR1.x*0x100000000)));
atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-dEdR1.y*0x100000000)));
atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-dEdR1.z*0x100000000)));
atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (-dEdR2.x*0x100000000)));
atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-dEdR2.y*0x100000000)));
atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-dEdR2.z*0x100000000)));
#endif
}
#endif
#ifdef INCLUDE_ENERGY
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
#endif
SAVE_DERIVATIVES
} | d7353b640ce68381eeb793f8460760b940378c21.cu | #define WARPS_PER_GROUP (THREAD_BLOCK_SIZE/TILE_SIZE)
#ifndef ENABLE_SHUFFLE
typedef struct {
real x, y, z;
real q;
real fx, fy, fz;
ATOM_PARAMETER_DATA
#ifndef PARAMETER_SIZE_IS_EVEN
real padding;
#endif
} AtomData;
#endif
#ifdef ENABLE_SHUFFLE
//support for 64 bit shuffles
static __inline__ __device__ float real_shfl(float var, int srcLane) {
return SHFL(var, srcLane);
}
static __inline__ __device__ double real_shfl(double var, int srcLane) {
int hi, lo;
asm volatile("mov.b64 { %0, %1 }, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
hi = SHFL(hi, srcLane);
lo = SHFL(lo, srcLane);
return __hiloint2double( hi, lo );
}
static __inline__ __device__ long long real_shfl(long long var, int srcLane) {
int hi, lo;
asm volatile("mov.b64 { %0, %1 }, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
hi = SHFL(hi, srcLane);
lo = SHFL(lo, srcLane);
// unforunately there isn't an __nv_hiloint2long(hi,lo) intrinsic cast
int2 fuse; fuse.x = lo; fuse.y = hi;
return *reinterpret_cast<long long*>(&fuse);
}
#endif
/**
* Compute nonbonded interactions. The kernel is separated into two parts,
* tiles with exclusions and tiles without exclusions. It relies heavily on
* implicit warp-level synchronization. A tile is defined by two atom blocks
* each of warpsize. Each warp computes a range of tiles.
*
* Tiles with exclusions compute the entire set of interactions across
* atom blocks, equal to warpsize*warpsize. In order to avoid access conflicts
* the forces are computed and accumulated diagonally in the manner shown below
* where, suppose
*
* [a-h] comprise atom block 1, [i-p] comprise atom block 2
*
* 1 denotes the first set of calculations within the warp
* 2 denotes the second set of calculations within the warp
* ... etc.
*
* threads
* 0 1 2 3 4 5 6 7
* atom1
* L a b c d e f g h
* o i 1 2 3 4 5 6 7 8
* c j 8 1 2 3 4 5 6 7
* a k 7 8 1 2 3 4 5 6
* l l 6 7 8 1 2 3 4 5
* D m 5 6 7 8 1 2 3 4
* a n 4 5 6 7 8 1 2 3
* t o 3 4 5 6 7 8 1 2
* a p 2 3 4 5 6 7 8 1
*
* Tiles without exclusions read off directly from the neighbourlist interactingAtoms
* and follows the same force accumulation method. If more there are more interactingTiles
* than the size of the neighbourlist initially allocated, the neighbourlist is rebuilt
* and the full tileset is computed. This should happen on the first step, and very rarely
* afterwards.
*
* On CUDA devices that support the shuffle intrinsic, on diagonal exclusion tiles use
* __shfl to broadcast. For all other types of tiles __shfl is used to pass around the
* forces, positions, and parameters when computing the forces.
*
* [out]forceBuffers - forces on each atom to eventually be accumulated
* [out]energyBuffer - energyBuffer to eventually be accumulated
* [in]posq - x,y,z,charge
* [in]exclusions - 1024-bit flags denoting atom-atom exclusions for each tile
* [in]exclusionTiles - x,y denotes the indices of tiles that have an exclusion
* [in]startTileIndex - index into first tile to be processed
* [in]numTileIndices - number of tiles this context is responsible for processing
* [in]int tiles - the atom block for each tile
* [in]interactionCount - total number of tiles that have an interaction
* [in]maxTiles - stores the size of the neighbourlist in case it needs
* - to be expanded
* [in]periodicBoxSize - size of the Periodic Box, last dimension (w) not used
* [in]invPeriodicBox - inverse of the periodicBoxSize, pre-computed for speed
* [in]blockCenter - the center of each block in euclidean coordinates
* [in]blockSize - size of the each block, radiating from the center
* - x is half the distance of total length
* - y is half the distance of total width
* - z is half the distance of total height
* - w is not used
* [in]interactingAtoms - a list of interactions within a given tile
*
*/
extern "C" __global__ void computeNonbonded(
unsigned long long* __restrict__ forceBuffers, mixed* __restrict__ energyBuffer, const real4* __restrict__ posq, const tileflags* __restrict__ exclusions,
const ushort2* __restrict__ exclusionTiles, unsigned int startTileIndex, unsigned int numTileIndices
#ifdef USE_CUTOFF
, const int* __restrict__ tiles, const unsigned int* __restrict__ interactionCount, real4 periodicBoxSize, real4 invPeriodicBoxSize,
real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, unsigned int maxTiles, const real4* __restrict__ blockCenter,
const real4* __restrict__ blockSize, const unsigned int* __restrict__ interactingAtoms, unsigned int maxSinglePairs,
const int2* __restrict__ singlePairs
#endif
PARAMETER_ARGUMENTS) {
const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE;
const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE; // global warpIndex
const unsigned int tgx = threadIdx.x & (TILE_SIZE-1); // index within the warp
const unsigned int tbx = threadIdx.x - tgx; // block warpIndex
mixed energy = 0;
INIT_DERIVATIVES
// used shared memory if the device cannot shuffle
#ifndef ENABLE_SHUFFLE
__shared__ AtomData localData[THREAD_BLOCK_SIZE];
#endif
// First loop: process tiles that contain exclusions.
const unsigned int firstExclusionTile = FIRST_EXCLUSION_TILE+warp*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
const unsigned int lastExclusionTile = FIRST_EXCLUSION_TILE+(warp+1)*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
for (int pos = firstExclusionTile; pos < lastExclusionTile; pos++) {
const ushort2 tileIndices = exclusionTiles[pos];
const unsigned int x = tileIndices.x;
const unsigned int y = tileIndices.y;
real3 force = make_real3(0);
unsigned int atom1 = x*TILE_SIZE + tgx;
real4 posq1 = posq[atom1];
LOAD_ATOM1_PARAMETERS
#ifdef USE_EXCLUSIONS
tileflags excl = exclusions[pos*TILE_SIZE+tgx];
#endif
const bool hasExclusions = true;
if (x == y) {
// This tile is on the diagonal.
#ifdef ENABLE_SHUFFLE
real4 shflPosq = posq1;
#else
localData[threadIdx.x].x = posq1.x;
localData[threadIdx.x].y = posq1.y;
localData[threadIdx.x].z = posq1.z;
localData[threadIdx.x].q = posq1.w;
LOAD_LOCAL_PARAMETERS_FROM_1
#endif
// we do not need to fetch parameters from global since this is a symmetric tile
// instead we can broadcast the values using shuffle
for (unsigned int j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+j;
real4 posq2;
#ifdef ENABLE_SHUFFLE
BROADCAST_WARP_DATA
#else
posq2 = make_real4(localData[atom2].x, localData[atom2].y, localData[atom2].z, localData[atom2].q);
#endif
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = y*TILE_SIZE+j;
#ifdef USE_SYMMETRIC
real dEdR = 0.0f;
#else
real3 dEdR1 = make_real3(0);
real3 dEdR2 = make_real3(0);
#endif
#ifdef USE_EXCLUSIONS
bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS || !(excl & 0x1));
#endif
real tempEnergy = 0.0f;
const real interactionScale = 0.5f;
COMPUTE_INTERACTION
energy += 0.5f*tempEnergy;
#ifdef INCLUDE_FORCES
#ifdef USE_SYMMETRIC
force.x -= delta.x*dEdR;
force.y -= delta.y*dEdR;
force.z -= delta.z*dEdR;
#else
force.x -= dEdR1.x;
force.y -= dEdR1.y;
force.z -= dEdR1.z;
#endif
#endif
#ifdef USE_EXCLUSIONS
excl >>= 1;
#endif
}
}
else {
// This is an off-diagonal tile.
unsigned int j = y*TILE_SIZE + tgx;
real4 shflPosq = posq[j];
#ifdef ENABLE_SHUFFLE
real3 shflForce;
shflForce.x = 0.0f;
shflForce.y = 0.0f;
shflForce.z = 0.0f;
#else
localData[threadIdx.x].x = shflPosq.x;
localData[threadIdx.x].y = shflPosq.y;
localData[threadIdx.x].z = shflPosq.z;
localData[threadIdx.x].q = shflPosq.w;
localData[threadIdx.x].fx = 0.0f;
localData[threadIdx.x].fy = 0.0f;
localData[threadIdx.x].fz = 0.0f;
#endif
DECLARE_LOCAL_PARAMETERS
LOAD_LOCAL_PARAMETERS_FROM_GLOBAL
#ifdef USE_EXCLUSIONS
excl = (excl >> tgx) | (excl << (TILE_SIZE - tgx));
#endif
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+tj;
#ifdef ENABLE_SHUFFLE
real4 posq2 = shflPosq;
#else
real4 posq2 = make_real4(localData[atom2].x, localData[atom2].y, localData[atom2].z, localData[atom2].q);
#endif
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = y*TILE_SIZE+tj;
#ifdef USE_SYMMETRIC
real dEdR = 0.0f;
#else
real3 dEdR1 = make_real3(0);
real3 dEdR2 = make_real3(0);
#endif
#ifdef USE_EXCLUSIONS
bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS || !(excl & 0x1));
#endif
real tempEnergy = 0.0f;
const real interactionScale = 1.0f;
COMPUTE_INTERACTION
energy += tempEnergy;
#ifdef INCLUDE_FORCES
#ifdef USE_SYMMETRIC
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += delta.x;
shflForce.y += delta.y;
shflForce.z += delta.z;
#else
localData[tbx+tj].fx += delta.x;
localData[tbx+tj].fy += delta.y;
localData[tbx+tj].fz += delta.z;
#endif
#else // !USE_SYMMETRIC
force.x -= dEdR1.x;
force.y -= dEdR1.y;
force.z -= dEdR1.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += dEdR2.x;
shflForce.y += dEdR2.y;
shflForce.z += dEdR2.z;
#else
localData[tbx+tj].fx += dEdR2.x;
localData[tbx+tj].fy += dEdR2.y;
localData[tbx+tj].fz += dEdR2.z;
#endif
#endif // end USE_SYMMETRIC
#endif
#ifdef ENABLE_SHUFFLE
SHUFFLE_WARP_DATA
#endif
#ifdef USE_EXCLUSIONS
excl >>= 1;
#endif
// cycles the indices
// 0 1 2 3 4 5 6 7 -> 1 2 3 4 5 6 7 0
tj = (tj + 1) & (TILE_SIZE - 1);
}
const unsigned int offset = y*TILE_SIZE + tgx;
// write results for off diagonal tiles
#ifdef INCLUDE_FORCES
#ifdef ENABLE_SHUFFLE
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (shflForce.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (shflForce.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (shflForce.z*0x100000000)));
#else
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fx*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fy*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fz*0x100000000)));
#endif
#endif
}
// Write results for on and off diagonal tiles
#ifdef INCLUDE_FORCES
const unsigned int offset = x*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000)));
#endif
}
// Second loop: tiles without exclusions, either from the neighbor list (with cutoff) or just enumerating all
// of them (no cutoff).
#ifdef USE_CUTOFF
const unsigned int numTiles = interactionCount[0];
if (numTiles > maxTiles)
return; // There wasn't enough memory for the neighbor list.
int pos = (int) (numTiles > maxTiles ? startTileIndex+warp*(long long)numTileIndices/totalWarps : warp*(long long)numTiles/totalWarps);
int end = (int) (numTiles > maxTiles ? startTileIndex+(warp+1)*(long long)numTileIndices/totalWarps : (warp+1)*(long long)numTiles/totalWarps);
#else
const unsigned int numTiles = numTileIndices;
int pos = (int) (startTileIndex+warp*(long long)numTiles/totalWarps);
int end = (int) (startTileIndex+(warp+1)*(long long)numTiles/totalWarps);
#endif
int skipBase = 0;
int currentSkipIndex = tbx;
// atomIndices can probably be shuffled as well
// but it probably wouldn't make things any faster
__shared__ int atomIndices[THREAD_BLOCK_SIZE];
__shared__ volatile int skipTiles[THREAD_BLOCK_SIZE];
skipTiles[threadIdx.x] = -1;
while (pos < end) {
const bool hasExclusions = false;
real3 force = make_real3(0);
bool includeTile = true;
// Extract the coordinates of this tile.
int x, y;
bool singlePeriodicCopy = false;
#ifdef USE_CUTOFF
x = tiles[pos];
real4 blockSizeX = blockSize[x];
singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= MAX_CUTOFF &&
0.5f*periodicBoxSize.y-blockSizeX.y >= MAX_CUTOFF &&
0.5f*periodicBoxSize.z-blockSizeX.z >= MAX_CUTOFF);
#else
y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos));
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error.
y += (x < y ? -1 : 1);
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
}
// Skip over tiles that have exclusions, since they were already processed.
while (skipTiles[tbx+TILE_SIZE-1] < pos) {
if (skipBase+tgx < NUM_TILES_WITH_EXCLUSIONS) {
ushort2 tile = exclusionTiles[skipBase+tgx];
skipTiles[threadIdx.x] = tile.x + tile.y*NUM_BLOCKS - tile.y*(tile.y+1)/2;
}
else
skipTiles[threadIdx.x] = end;
skipBase += TILE_SIZE;
currentSkipIndex = tbx;
}
while (skipTiles[currentSkipIndex] < pos)
currentSkipIndex++;
includeTile = (skipTiles[currentSkipIndex] != pos);
#endif
if (includeTile) {
unsigned int atom1 = x*TILE_SIZE + tgx;
// Load atom data for this tile.
real4 posq1 = posq[atom1];
LOAD_ATOM1_PARAMETERS
//const unsigned int localAtomIndex = threadIdx.x;
#ifdef USE_CUTOFF
unsigned int j = interactingAtoms[pos*TILE_SIZE+tgx];
#else
unsigned int j = y*TILE_SIZE + tgx;
#endif
atomIndices[threadIdx.x] = j;
#ifdef ENABLE_SHUFFLE
DECLARE_LOCAL_PARAMETERS
real4 shflPosq;
real3 shflForce;
shflForce.x = 0.0f;
shflForce.y = 0.0f;
shflForce.z = 0.0f;
#endif
if (j < PADDED_NUM_ATOMS) {
// Load position of atom j from from global memory
#ifdef ENABLE_SHUFFLE
shflPosq = posq[j];
#else
localData[threadIdx.x].x = posq[j].x;
localData[threadIdx.x].y = posq[j].y;
localData[threadIdx.x].z = posq[j].z;
localData[threadIdx.x].q = posq[j].w;
localData[threadIdx.x].fx = 0.0f;
localData[threadIdx.x].fy = 0.0f;
localData[threadIdx.x].fz = 0.0f;
#endif
LOAD_LOCAL_PARAMETERS_FROM_GLOBAL
}
else {
#ifdef ENABLE_SHUFFLE
shflPosq = make_real4(0, 0, 0, 0);
#else
localData[threadIdx.x].x = 0;
localData[threadIdx.x].y = 0;
localData[threadIdx.x].z = 0;
#endif
}
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
// The box is small enough that we can just translate all the atoms into a single periodic
// box, then skip having to apply periodic boundary conditions later.
real4 blockCenterX = blockCenter[x];
APPLY_PERIODIC_TO_POS_WITH_CENTER(posq1, blockCenterX)
#ifdef ENABLE_SHUFFLE
APPLY_PERIODIC_TO_POS_WITH_CENTER(shflPosq, blockCenterX)
#else
APPLY_PERIODIC_TO_POS_WITH_CENTER(localData[threadIdx.x], blockCenterX)
#endif
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+tj;
#ifdef ENABLE_SHUFFLE
real4 posq2 = shflPosq;
#else
real4 posq2 = make_real4(localData[atom2].x, localData[atom2].y, localData[atom2].z, localData[atom2].q);
#endif
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = atomIndices[tbx+tj];
#ifdef USE_SYMMETRIC
real dEdR = 0.0f;
#else
real3 dEdR1 = make_real3(0);
real3 dEdR2 = make_real3(0);
#endif
#ifdef USE_EXCLUSIONS
bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS);
#endif
real tempEnergy = 0.0f;
const real interactionScale = 1.0f;
COMPUTE_INTERACTION
energy += tempEnergy;
#ifdef INCLUDE_FORCES
#ifdef USE_SYMMETRIC
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += delta.x;
shflForce.y += delta.y;
shflForce.z += delta.z;
#else
localData[tbx+tj].fx += delta.x;
localData[tbx+tj].fy += delta.y;
localData[tbx+tj].fz += delta.z;
#endif
#else // !USE_SYMMETRIC
force.x -= dEdR1.x;
force.y -= dEdR1.y;
force.z -= dEdR1.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += dEdR2.x;
shflForce.y += dEdR2.y;
shflForce.z += dEdR2.z;
#else
localData[tbx+tj].fx += dEdR2.x;
localData[tbx+tj].fy += dEdR2.y;
localData[tbx+tj].fz += dEdR2.z;
#endif
#endif // end USE_SYMMETRIC
#endif
#ifdef ENABLE_SHUFFLE
SHUFFLE_WARP_DATA
#endif
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
else
#endif
{
// We need to apply periodic boundary conditions separately for each interaction.
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+tj;
#ifdef ENABLE_SHUFFLE
real4 posq2 = shflPosq;
#else
real4 posq2 = make_real4(localData[atom2].x, localData[atom2].y, localData[atom2].z, localData[atom2].q);
#endif
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = atomIndices[tbx+tj];
#ifdef USE_SYMMETRIC
real dEdR = 0.0f;
#else
real3 dEdR1 = make_real3(0);
real3 dEdR2 = make_real3(0);
#endif
#ifdef USE_EXCLUSIONS
bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS);
#endif
real tempEnergy = 0.0f;
const real interactionScale = 1.0f;
COMPUTE_INTERACTION
energy += tempEnergy;
#ifdef INCLUDE_FORCES
#ifdef USE_SYMMETRIC
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += delta.x;
shflForce.y += delta.y;
shflForce.z += delta.z;
#else
localData[tbx+tj].fx += delta.x;
localData[tbx+tj].fy += delta.y;
localData[tbx+tj].fz += delta.z;
#endif
#else // !USE_SYMMETRIC
force.x -= dEdR1.x;
force.y -= dEdR1.y;
force.z -= dEdR1.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += dEdR2.x;
shflForce.y += dEdR2.y;
shflForce.z += dEdR2.z;
#else
localData[tbx+tj].fx += dEdR2.x;
localData[tbx+tj].fy += dEdR2.y;
localData[tbx+tj].fz += dEdR2.z;
#endif
#endif // end USE_SYMMETRIC
#endif
#ifdef ENABLE_SHUFFLE
SHUFFLE_WARP_DATA
#endif
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
// Write results.
#ifdef INCLUDE_FORCES
atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (force.x*0x100000000)));
atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000)));
atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000)));
#ifdef USE_CUTOFF
unsigned int atom2 = atomIndices[threadIdx.x];
#else
unsigned int atom2 = y*TILE_SIZE + tgx;
#endif
if (atom2 < PADDED_NUM_ATOMS) {
#ifdef ENABLE_SHUFFLE
atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (shflForce.x*0x100000000)));
atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (shflForce.y*0x100000000)));
atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (shflForce.z*0x100000000)));
#else
atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fx*0x100000000)));
atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fy*0x100000000)));
atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fz*0x100000000)));
#endif
}
#endif
}
pos++;
}
// Third loop: single pairs that aren't part of a tile.
#if USE_CUTOFF
const unsigned int numPairs = interactionCount[1];
if (numPairs > maxSinglePairs)
return; // There wasn't enough memory for the neighbor list.
for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < numPairs; i += blockDim.x*gridDim.x) {
int2 pair = singlePairs[i];
int atom1 = pair.x;
int atom2 = pair.y;
real4 posq1 = posq[atom1];
real4 posq2 = posq[atom2];
LOAD_ATOM1_PARAMETERS
int j = atom2;
atom2 = threadIdx.x;
DECLARE_LOCAL_PARAMETERS
LOAD_LOCAL_PARAMETERS_FROM_GLOBAL
LOAD_ATOM2_PARAMETERS
atom2 = pair.y;
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real invR = RSQRT(r2);
real r = r2*invR;
#ifdef USE_SYMMETRIC
real dEdR = 0.0f;
#else
real3 dEdR1 = make_real3(0);
real3 dEdR2 = make_real3(0);
#endif
bool hasExclusions = false;
bool isExcluded = false;
real tempEnergy = 0.0f;
const real interactionScale = 1.0f;
COMPUTE_INTERACTION
energy += tempEnergy;
#ifdef INCLUDE_FORCES
#ifdef USE_SYMMETRIC
real3 dEdR1 = delta*dEdR;
real3 dEdR2 = -dEdR1;
#endif
atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (-dEdR1.x*0x100000000)));
atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-dEdR1.y*0x100000000)));
atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-dEdR1.z*0x100000000)));
atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (-dEdR2.x*0x100000000)));
atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-dEdR2.y*0x100000000)));
atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-dEdR2.z*0x100000000)));
#endif
}
#endif
#ifdef INCLUDE_ENERGY
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
#endif
SAVE_DERIVATIVES
} |
6523e49139355132cc0eb638ce06c374d61e7309.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
//#if (GPUSHMEM < 200)
#define BLOCK_SIZE 512
//#else
// #define BLOCK_SIZE 768
//#endif
__global__ void magma_strmv_tkernel(float *T, int ldt, float *v,
float *y);
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void zsum_reduce( /*int n,*/ int i, float* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
//==============================================================================
__global__ void
magma_sgemv_kernel1(int m, const float * __restrict__ V, int ldv,
const float * __restrict__ c,
float *dwork)
{
const int i = threadIdx.x;
const float *dV = V + (blockIdx.x) * ldv;
__shared__ float sum[ BLOCK_SIZE ];
float lsum;
/* lsum := v' * C */
lsum = MAGMA_S_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE )
lsum += MAGMA_S_MUL( MAGMA_S_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
zsum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i==0)
dwork [blockIdx.x] = sum[0];
}
//==============================================================================
__global__ void
magma_sgemv_kernel3(int m, const float * __restrict__ V, int ldv, float *c,
float *dwork, float *tau)
{
const int i = threadIdx.x;
const float *dV = V + (blockIdx.x) * ldv;
__shared__ float sum[ BLOCK_SIZE ];
float lsum;
if (i==0)
c[0] = MAGMA_S_ONE;
/* lsum := v' * C */
lsum = MAGMA_S_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE )
lsum += MAGMA_S_MUL( MAGMA_S_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
zsum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i==0)
dwork [blockIdx.x] = -tau[0]*sum[0];
}
//==============================================================================
__global__ void
magma_sgemv_kernel2(int m, int n, const float * __restrict__ V, int ldv,
const float * __restrict__ x, float *c)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
float lsum;
V += j;
lsum = MAGMA_S_ZERO;
if (j < m){
for(int k=0; k<n; k++)
lsum += MAGMA_S_MUL( V[k*ldv], x[k]);
c[j] -= lsum;
}
}
//==============================================================================
/*
Apply a real block reflector H to a real vector C from the left
(i.e., C = H C). H is represented in the form
H = I - V T V'
where T is the real k-by-k upper triangular matrix in the
representation of the block reflector, and V is a real block of
k elementary reflectors.
*/
extern "C" void
magma_slarfbx_gpu(magma_int_t m, magma_int_t k, float *V, magma_int_t ldv,
float *T, magma_int_t ldt, float *c,
float *dwork)
{
/* dwork = V' c */
hipLaunchKernelGGL(( magma_sgemv_kernel1), dim3(k), dim3(BLOCK_SIZE), 0, magma_stream , m, V, ldv, c, dwork);
/* dwork = T' dwork */
hipLaunchKernelGGL(( magma_strmv_tkernel), dim3(k), dim3(k), 0, magma_stream , T, ldt, dwork, dwork+k);
/* c = c - V dwork */
dim3 blocks3( (m + BLOCK_SIZE-1) / BLOCK_SIZE );
dim3 threads3( BLOCK_SIZE );
hipLaunchKernelGGL(( magma_sgemv_kernel2), dim3(blocks3), dim3(threads3), 0, magma_stream , m, k, V, ldv, dwork+k, c);
}
//==============================================================================
| 6523e49139355132cc0eb638ce06c374d61e7309.cu | /*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
//#if (GPUSHMEM < 200)
#define BLOCK_SIZE 512
//#else
// #define BLOCK_SIZE 768
//#endif
__global__ void magma_strmv_tkernel(float *T, int ldt, float *v,
float *y);
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void zsum_reduce( /*int n,*/ int i, float* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
//==============================================================================
__global__ void
magma_sgemv_kernel1(int m, const float * __restrict__ V, int ldv,
const float * __restrict__ c,
float *dwork)
{
const int i = threadIdx.x;
const float *dV = V + (blockIdx.x) * ldv;
__shared__ float sum[ BLOCK_SIZE ];
float lsum;
/* lsum := v' * C */
lsum = MAGMA_S_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE )
lsum += MAGMA_S_MUL( MAGMA_S_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
zsum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i==0)
dwork [blockIdx.x] = sum[0];
}
//==============================================================================
__global__ void
magma_sgemv_kernel3(int m, const float * __restrict__ V, int ldv, float *c,
float *dwork, float *tau)
{
const int i = threadIdx.x;
const float *dV = V + (blockIdx.x) * ldv;
__shared__ float sum[ BLOCK_SIZE ];
float lsum;
if (i==0)
c[0] = MAGMA_S_ONE;
/* lsum := v' * C */
lsum = MAGMA_S_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE )
lsum += MAGMA_S_MUL( MAGMA_S_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
zsum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i==0)
dwork [blockIdx.x] = -tau[0]*sum[0];
}
//==============================================================================
__global__ void
magma_sgemv_kernel2(int m, int n, const float * __restrict__ V, int ldv,
const float * __restrict__ x, float *c)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
float lsum;
V += j;
lsum = MAGMA_S_ZERO;
if (j < m){
for(int k=0; k<n; k++)
lsum += MAGMA_S_MUL( V[k*ldv], x[k]);
c[j] -= lsum;
}
}
//==============================================================================
/*
Apply a real block reflector H to a real vector C from the left
(i.e., C = H C). H is represented in the form
H = I - V T V'
where T is the real k-by-k upper triangular matrix in the
representation of the block reflector, and V is a real block of
k elementary reflectors.
*/
extern "C" void
magma_slarfbx_gpu(magma_int_t m, magma_int_t k, float *V, magma_int_t ldv,
float *T, magma_int_t ldt, float *c,
float *dwork)
{
/* dwork = V' c */
magma_sgemv_kernel1<<< k, BLOCK_SIZE, 0, magma_stream >>>(m, V, ldv, c, dwork);
/* dwork = T' dwork */
magma_strmv_tkernel<<< k, k, 0, magma_stream >>>( T, ldt, dwork, dwork+k);
/* c = c - V dwork */
dim3 blocks3( (m + BLOCK_SIZE-1) / BLOCK_SIZE );
dim3 threads3( BLOCK_SIZE );
magma_sgemv_kernel2<<< blocks3, threads3, 0, magma_stream >>>( m, k, V, ldv, dwork+k, c);
}
//==============================================================================
|
c9b921126b895aabb74f1eb56a3edff4b706be8a.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// Created by raver119 on 30.11.17.
//
#include <execution/LaunchContext.h>
#include <logger.h>
#include <exceptions/cuda_exception.h>
#include <helpers/cublasHelper.h>
#include <thread>
#include <execution/AffinityManager.h>
thread_local nd4j::ContextBuffers contextBuffers = nd4j::ContextBuffers();
namespace nd4j {
std::vector<std::shared_ptr<LaunchContext>> LaunchContext::_contexts = std::vector<std::shared_ptr<LaunchContext>>();
std::mutex LaunchContext::_mutex;
////////////////////////////////////////////////////////////////////////
LaunchContext::LaunchContext(hipStream_t *cudaStream, hipStream_t& specialCudaStream, void* reductionPointer, void* scalarPointer, int* allocationPointer) {
//_cudaStream = cudaStream;
//_cudaSpecialStream = &specialCudaStream; // ideal is = new hipStream_t; *_cudaSpecialStream = specialCudaStream;
//_reductionPointer = reductionPointer;
//_scalarPointer = scalarPointer;
//_allocationPointer = allocationPointer;
_workspace = nullptr;
_isAllocated = false;
}
LaunchContext::~LaunchContext() {
if (_isAllocated) {
}
}
////////////////////////////////////////////////////////////////////////
LaunchContext::LaunchContext() {
// default constructor, just to make clang/ranlib happy
_workspace = nullptr;
_deviceID = 0;
_isAllocated = true;
}
LaunchContext::LaunchContext(Nd4jPointer cudaStream, Nd4jPointer reductionPointer, Nd4jPointer scalarPointer, Nd4jPointer allocationPointer) {
_isAllocated = false;
//_cudaStream = reinterpret_cast<hipStream_t*>(cudaStream);
// _cudaSpecialStream = reinterpret_cast<hipStream_t*>(cudaStream);
//_reductionPointer = reductionPointer;
//_scalarPointer = scalarPointer;
//_allocationPointer = reinterpret_cast<int *>(allocationPointer);
}
LaunchContext* LaunchContext::defaultContext() {
/**
* This method returns LaunchContext, that has multiple entities within:
* 1) temporary buffers. they must be per-thread
* 2) CUDA stream. it must be either per-thread or per-device
* 3) cuBLAS handle. it must be per-device
*/
auto deviceId = AffinityManager::currentDeviceId();
// we need this block synchronous, to avoid double initialization etc
_mutex.lock();
if (LaunchContext::_contexts.empty()) {
// create one context per device
auto numDevices = AffinityManager::numberOfDevices();
_contexts.resize(numDevices);
for (int e = 0; e < numDevices; e++) {
AffinityManager::setCurrentNativeDevice(e);
LaunchContext::_contexts[e] = std::make_shared<LaunchContext>();
}
// don't forget to restore device back again
AffinityManager::setCurrentNativeDevice(deviceId);
}
_mutex.unlock();
// return context for current device
return LaunchContext::_contexts[deviceId].get();
}
void* LaunchContext::getReductionPointer () const {
return contextBuffers.reductionBuffer();
};
void* LaunchContext::getScalarPointer() const {
return contextBuffers.scalarBuffer();
};
int* LaunchContext::getAllocationPointer() const {
return reinterpret_cast<int*>(contextBuffers.allocationBuffer());
};
void* LaunchContext::getCublasHandle() const {
return CublasHelper::getInstance()->handle();
};
void* LaunchContext::getCusolverHandle() const {
return CublasHelper::getInstance()->solver();
};
hipStream_t* LaunchContext::getCudaStream() const {
return reinterpret_cast<hipStream_t*>(contextBuffers.execStream());
};
hipStream_t* LaunchContext::getCudaSpecialStream() const {
return reinterpret_cast<hipStream_t*>(contextBuffers.specialStream());;
};
void LaunchContext::setReductionPointer (void* reductionPointer) {
contextBuffers.setReductionBuffer(reductionPointer);
};
void LaunchContext::setScalarPointer(void* scalarPointer) {
contextBuffers.setScalarBuffer(scalarPointer);
};
void LaunchContext::setAllocationPointer(int* allocationPointer) {
contextBuffers.setAllocationBuffer(allocationPointer);
};
void LaunchContext::setCudaStream(hipStream_t* cudaStream) {
//_cudaStream = cudaStream;
};
void LaunchContext::setCudaSpecialStream(hipStream_t* cudaStream) {
//_cudaSpecialStream = cudaStream;
};
void LaunchContext::setCublasHandle(void *handle) {
_cublasHandle = handle;
};
void LaunchContext::swapContextBuffers(ContextBuffers &buffers) {
contextBuffers = buffers;
};
void LaunchContext::releaseBuffers() {
//nd4j_printf("LaunchContext::releaseBuffers() was invoked\n", "");
contextBuffers.release();
}
bool LaunchContext::isInitialized() {
return contextBuffers.isInitialized();
}
sd::ErrorReference* LaunchContext::errorReference() {
return contextBuffers.errorReference();
}
} | c9b921126b895aabb74f1eb56a3edff4b706be8a.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// Created by raver119 on 30.11.17.
//
#include <execution/LaunchContext.h>
#include <logger.h>
#include <exceptions/cuda_exception.h>
#include <helpers/cublasHelper.h>
#include <thread>
#include <execution/AffinityManager.h>
thread_local nd4j::ContextBuffers contextBuffers = nd4j::ContextBuffers();
namespace nd4j {
std::vector<std::shared_ptr<LaunchContext>> LaunchContext::_contexts = std::vector<std::shared_ptr<LaunchContext>>();
std::mutex LaunchContext::_mutex;
////////////////////////////////////////////////////////////////////////
LaunchContext::LaunchContext(cudaStream_t *cudaStream, cudaStream_t& specialCudaStream, void* reductionPointer, void* scalarPointer, int* allocationPointer) {
//_cudaStream = cudaStream;
//_cudaSpecialStream = &specialCudaStream; // ideal is = new cudaStream_t; *_cudaSpecialStream = specialCudaStream;
//_reductionPointer = reductionPointer;
//_scalarPointer = scalarPointer;
//_allocationPointer = allocationPointer;
_workspace = nullptr;
_isAllocated = false;
}
LaunchContext::~LaunchContext() {
if (_isAllocated) {
}
}
////////////////////////////////////////////////////////////////////////
LaunchContext::LaunchContext() {
// default constructor, just to make clang/ranlib happy
_workspace = nullptr;
_deviceID = 0;
_isAllocated = true;
}
LaunchContext::LaunchContext(Nd4jPointer cudaStream, Nd4jPointer reductionPointer, Nd4jPointer scalarPointer, Nd4jPointer allocationPointer) {
_isAllocated = false;
//_cudaStream = reinterpret_cast<cudaStream_t*>(cudaStream);
// _cudaSpecialStream = reinterpret_cast<cudaStream_t*>(cudaStream);
//_reductionPointer = reductionPointer;
//_scalarPointer = scalarPointer;
//_allocationPointer = reinterpret_cast<int *>(allocationPointer);
}
LaunchContext* LaunchContext::defaultContext() {
/**
* This method returns LaunchContext, that has multiple entities within:
* 1) temporary buffers. they must be per-thread
* 2) CUDA stream. it must be either per-thread or per-device
* 3) cuBLAS handle. it must be per-device
*/
auto deviceId = AffinityManager::currentDeviceId();
// we need this block synchronous, to avoid double initialization etc
_mutex.lock();
if (LaunchContext::_contexts.empty()) {
// create one context per device
auto numDevices = AffinityManager::numberOfDevices();
_contexts.resize(numDevices);
for (int e = 0; e < numDevices; e++) {
AffinityManager::setCurrentNativeDevice(e);
LaunchContext::_contexts[e] = std::make_shared<LaunchContext>();
}
// don't forget to restore device back again
AffinityManager::setCurrentNativeDevice(deviceId);
}
_mutex.unlock();
// return context for current device
return LaunchContext::_contexts[deviceId].get();
}
void* LaunchContext::getReductionPointer () const {
return contextBuffers.reductionBuffer();
};
void* LaunchContext::getScalarPointer() const {
return contextBuffers.scalarBuffer();
};
int* LaunchContext::getAllocationPointer() const {
return reinterpret_cast<int*>(contextBuffers.allocationBuffer());
};
void* LaunchContext::getCublasHandle() const {
return CublasHelper::getInstance()->handle();
};
void* LaunchContext::getCusolverHandle() const {
return CublasHelper::getInstance()->solver();
};
cudaStream_t* LaunchContext::getCudaStream() const {
return reinterpret_cast<cudaStream_t*>(contextBuffers.execStream());
};
cudaStream_t* LaunchContext::getCudaSpecialStream() const {
return reinterpret_cast<cudaStream_t*>(contextBuffers.specialStream());;
};
void LaunchContext::setReductionPointer (void* reductionPointer) {
contextBuffers.setReductionBuffer(reductionPointer);
};
void LaunchContext::setScalarPointer(void* scalarPointer) {
contextBuffers.setScalarBuffer(scalarPointer);
};
void LaunchContext::setAllocationPointer(int* allocationPointer) {
contextBuffers.setAllocationBuffer(allocationPointer);
};
void LaunchContext::setCudaStream(cudaStream_t* cudaStream) {
//_cudaStream = cudaStream;
};
void LaunchContext::setCudaSpecialStream(cudaStream_t* cudaStream) {
//_cudaSpecialStream = cudaStream;
};
void LaunchContext::setCublasHandle(void *handle) {
_cublasHandle = handle;
};
void LaunchContext::swapContextBuffers(ContextBuffers &buffers) {
contextBuffers = buffers;
};
void LaunchContext::releaseBuffers() {
//nd4j_printf("LaunchContext::releaseBuffers() was invoked\n", "");
contextBuffers.release();
}
bool LaunchContext::isInitialized() {
return contextBuffers.isInitialized();
}
sd::ErrorReference* LaunchContext::errorReference() {
return contextBuffers.errorReference();
}
} |
86b18b47efe6eaeebef5527e0a6f942a4eeca3a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//////////////////////////////////////////////////////////////////////////
// < A CUDA/OpenCL General Sparse Matrix-Matrix Multiplication Program >
//
// < See paper:
// Weifeng Liu and Brian Vinter, "An Efficient GPU General Sparse
// Matrix-Matrix Multiplication for Irregular Data," Parallel and
// Distributed Processing Symposium, 2014 IEEE 28th International
// (IPDPS '14), pp.370-381, 19-23 May 2014
// for details. >
//////////////////////////////////////////////////////////////////////////
#include "bhsparse_cuda.h"
bhsparse_cuda::bhsparse_cuda()
{
}
int bhsparse_cuda::initPlatform()
{
_profiling = false;
int device_id = 0;
hipSetDevice(device_id);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device_id);
_num_smxs = deviceProp.multiProcessorCount;
_max_blocks_per_smx = deviceProp.maxThreadsPerMultiProcessor / WARPSIZE_NV;
cout << "Device [" << device_id << "] " << deviceProp.name
<< " @ " << deviceProp.clockRate * 1e-3f << "MHz. "
<< _num_smxs << " SMXs." << endl;
return 0;
}
int bhsparse_cuda::freePlatform()
{
int err = 0;
return err;
}
int bhsparse_cuda::free_mem()
{
int err = 0;
// A
hipFree(_d_csrValA);
hipFree(_d_csrRowPtrA);
hipFree(_d_csrColIndA);
// B
hipFree(_d_csrValB);
hipFree(_d_csrRowPtrB);
hipFree(_d_csrColIndB);
// C
hipFree(_d_csrValC);
hipFree(_d_csrRowPtrC);
hipFree(_d_csrColIndC);
// Ct
hipFree(_d_csrValCt);
hipFree(_d_csrRowPtrCt);
hipFree(_d_csrColIndCt);
// QUEUE_ONEs
hipFree(_d_queue_one);
return err;
}
int bhsparse_cuda::initData(int m, int k, int n,
int nnzA, value_type *csrValA, index_type *csrRowPtrA, index_type *csrColIndA,
int nnzB, value_type *csrValB, index_type *csrRowPtrB, index_type *csrColIndB,
index_type *csrRowPtrC, index_type *csrRowPtrCt, index_type *queue_one)
{
int err = 0;
_m = m;
_k = k;
_n = n;
_nnzA = nnzA;
_nnzB = nnzB;
_nnzC = 0;
_nnzCt = 0;
// malloc mem space and copy data from host to device
// Matrix A
checkCudaErrors(hipMalloc((void **)&_d_csrColIndA, _nnzA * sizeof(index_type)));
checkCudaErrors(hipMalloc((void **)&_d_csrRowPtrA, (_m+1) * sizeof(index_type)));
checkCudaErrors(hipMalloc((void **)&_d_csrValA, _nnzA * sizeof(value_type)));
checkCudaErrors(hipMemcpy(_d_csrColIndA, csrColIndA, _nnzA * sizeof(index_type), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_d_csrRowPtrA, csrRowPtrA, (_m+1) * sizeof(index_type), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_d_csrValA, csrValA, _nnzA * sizeof(value_type), hipMemcpyHostToDevice));
// Matrix B
checkCudaErrors(hipMalloc((void **)&_d_csrColIndB, _nnzB * sizeof(index_type)));
checkCudaErrors(hipMalloc((void **)&_d_csrRowPtrB, (_k+1) * sizeof(index_type)));
checkCudaErrors(hipMalloc((void **)&_d_csrValB, _nnzB * sizeof(value_type)));
checkCudaErrors(hipMemcpy(_d_csrColIndB, csrColIndB, _nnzB * sizeof(index_type), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_d_csrRowPtrB, csrRowPtrB, (_k+1) * sizeof(index_type), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_d_csrValB, csrValB, _nnzB * sizeof(value_type), hipMemcpyHostToDevice));
// Matrix C
_h_csrRowPtrC = csrRowPtrC;
checkCudaErrors(hipMalloc((void **)&_d_csrRowPtrC, (_m+1) * sizeof(index_type)));
checkCudaErrors(hipMemset(_d_csrRowPtrC, 0, (_m+1) * sizeof(index_type)));
// Matrix Ct
_h_csrRowPtrCt = csrRowPtrCt;
checkCudaErrors(hipMalloc((void **)&_d_csrRowPtrCt, (_m+1) * sizeof(index_type)));
checkCudaErrors(hipMemset(_d_csrRowPtrCt, 0, (_m+1) * sizeof(index_type)));
// statistics - queue_one
_h_queue_one = queue_one;
checkCudaErrors(hipMalloc((void **)&_d_queue_one, TUPLE_QUEUE * _m * sizeof(index_type)));
checkCudaErrors(hipMemset(_d_queue_one, 0, TUPLE_QUEUE * _m * sizeof(index_type)));
return err;
}
void bhsparse_cuda::setProfiling(bool profiling)
{
_profiling = profiling;
}
__global__ void
compute_nnzCt_cudakernel(const int* d_csrRowPtrA,
const int* __restrict__ d_csrColIndA,
const int* d_csrRowPtrB,
int *d_csrRowPtrCt,
const int m)
{
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
int start, stop, index, strideB, row_size_Ct = 0;
if (global_id < m)
{
start = d_csrRowPtrA[global_id];
stop = d_csrRowPtrA[global_id + 1];
for (int i = start; i < stop; i++)
{
index = d_csrColIndA[i];
strideB = d_csrRowPtrB[index + 1] - d_csrRowPtrB[index];
row_size_Ct += strideB;
}
d_csrRowPtrCt[global_id] = row_size_Ct;
}
if (global_id == 0)
d_csrRowPtrCt[m] = 0;
}
int bhsparse_cuda::warmup()
{
hipError_t err = hipSuccess;
int num_threads = GROUPSIZE_256;
int num_blocks = ceil((double)_m / (double)num_threads);
hipLaunchKernelGGL(( compute_nnzCt_cudakernel), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_csrRowPtrA,
_d_csrColIndA,
_d_csrRowPtrB,
_d_csrRowPtrCt,
_m);
err = hipGetLastError();
if (err != hipSuccess)
{ cout << "err = " << hipGetErrorString(err) << endl; return -1; }
return BHSPARSE_SUCCESS;
}
int bhsparse_cuda::kernel_barrier()
{
return hipDeviceSynchronize();
}
int bhsparse_cuda::compute_nnzCt()
{
hipError_t err = hipSuccess;
int num_threads = GROUPSIZE_256;
int num_blocks = ceil((double)_m / (double)num_threads);
hipLaunchKernelGGL(( compute_nnzCt_cudakernel), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_csrRowPtrA,
_d_csrColIndA,
_d_csrRowPtrB,
_d_csrRowPtrCt,
_m);
err = hipGetLastError();
if (err != hipSuccess)
{ cout << "err = " << hipGetErrorString(err) << endl; return -1; }
checkCudaErrors(hipMemcpy(_h_csrRowPtrCt, _d_csrRowPtrCt, (_m + 1) * sizeof(index_type), hipMemcpyDeviceToHost));
return BHSPARSE_SUCCESS;
}
int bhsparse_cuda::create_Ct(int nnzCt)
{
int err = 0;
checkCudaErrors(hipMemcpy(_d_queue_one, _h_queue_one, TUPLE_QUEUE * _m * sizeof(index_type), hipMemcpyHostToDevice));
_nnzCt = nnzCt;
// create device mem of Ct
checkCudaErrors(hipMalloc((void **)&_d_csrColIndCt, _nnzCt * sizeof(index_type)));
checkCudaErrors(hipMalloc((void **)&_d_csrValCt, _nnzCt * sizeof(value_type)));
checkCudaErrors(hipMemset(_d_csrColIndCt, 0, _nnzCt * sizeof(index_type)));
checkCudaErrors(hipMemset(_d_csrValCt, 0, _nnzCt * sizeof(value_type)));
return err;
}
//__inline__ __device__ void
//siftDown(int *s_key,
// value_type *s_val,
// int start,
// int stop)
//{
// int root = start;
// int child, swap;
// int temp_swap_key;
// value_type temp_swap_val;
// while (root * 2 + 1 <= stop)
// {
// child = root * 2 + 1;
// swap = root;
// if (s_key[swap] < s_key[child])
// swap = child;
// if (child + 1 <= stop && s_key[swap] < s_key[child + 1])
// swap = child + 1;
// if (swap != root)
// {
// //swap root and swap
// temp_swap_key = s_key[root];
// s_key[root] = s_key[swap];
// s_key[swap] = temp_swap_key;
// temp_swap_val = s_val[root];
// s_val[root] = s_val[swap];
// s_val[swap] = temp_swap_val;
// root = swap;
// }
// else
// return;
// }
//}
//__inline__ __device__ int
//heapsort(int *s_key,
// value_type *s_val,
// int segment_size)
//{
// // heapsort - heapify max-heap
// int start = (segment_size - 1) / 2;
// int stop = segment_size - 1;
// while (start >= 0)
// {
// siftDown(s_key, s_val, start, stop);
// start--;
// }
// // inject root element to the end
// int temp_swap_key;
// value_type temp_swap_val;
// temp_swap_key = s_key[0];
// s_key[0] = s_key[stop];
// s_key[stop] = temp_swap_key;
// temp_swap_val = s_val[0];
// s_val[0] = s_val[stop];
// s_val[stop] = temp_swap_val;
// stop--;
// siftDown(s_key, s_val, 0, stop);
// // this start is compressed list's start
// start = segment_size - 1;
// // heapsort - remove-max and compress
// while (stop >= 0)
// {
// if (s_key[0] == s_key[start])
// {
// s_val[start] += s_val[0];
// s_key[0] = s_key[stop];
// s_val[0] = s_val[stop];
// }
// else
// {
// start--;
// if (stop == start)
// {
// temp_swap_key = s_key[0];
// s_key[0] = s_key[stop];
// s_key[stop] = temp_swap_key;
// temp_swap_val = s_val[0];
// s_val[0] = s_val[stop];
// s_val[stop] = temp_swap_val;
// }
// else
// {
// s_key[start] = s_key[0];
// s_val[start] = s_val[0];
// s_key[0] = s_key[stop];
// s_val[0] = s_val[stop];
// }
// }
// stop--;
// siftDown(s_key, s_val, 0, stop);
// }
// return start;
//}
//template<typename vT, int c_segmentsize>
//__global__ void
//ESC_2heap_noncoalesced(const int* d_queue,
// const int* d_csrRowPtrA,
// const int* __restrict__ d_csrColIndA,
// const vT* __restrict__ d_csrValA,
// const int* d_csrRowPtrB,
// const int* __restrict__ d_csrColIndB,
// const vT* __restrict__ d_csrValB,
// int* d_csrRowPtrC,
// const int* d_csrRowPtrCt,
// int* d_csrColIndCt,
// vT* d_csrValCt,
// const int queue_size,
// const int d_queue_offset)
//{
// __shared__ int s_key[c_segmentsize * WARPSIZE_NV_2HEAP];
// __shared__ vT s_val[c_segmentsize * WARPSIZE_NV_2HEAP];
// __shared__ char s_start[WARPSIZE_NV_2HEAP];
// __shared__ char s_count[WARPSIZE_NV_2HEAP];
// int local_id = threadIdx.x;
// int group_id = blockIdx.x;
// int global_id = blockIdx.x * blockDim.x + threadIdx.x;
// int num_groups = gridDim.x;
// int local_size = blockDim.x;
// int *s_key_local = &s_key[local_id * c_segmentsize];
// vT *s_val_local = &s_val[local_id * c_segmentsize];
// if (global_id < queue_size)
// {
// int i, counter = 0;
// int rowidC = d_queue[TUPLE_QUEUE * (d_queue_offset + global_id)];
// int start_col_index_A, stop_col_index_A;
// int rowidB, start_col_index_B, stop_col_index_B;
// vT value_A;
// start_col_index_A = d_csrRowPtrA[rowidC];
// stop_col_index_A = d_csrRowPtrA[rowidC + 1];
// // i is both col index of A and row index of B
// for (i = start_col_index_A; i < stop_col_index_A; i++)
// {
// rowidB = d_csrColIndA[i];
// value_A = d_csrValA[i];
// start_col_index_B = d_csrRowPtrB[rowidB];
// stop_col_index_B = d_csrRowPtrB[rowidB + 1];
// for (int j = start_col_index_B; j < stop_col_index_B; j++)
// {
// s_key_local[counter] = d_csrColIndB[j];
// s_val_local[counter] = d_csrValB[j] * value_A;
// counter++;
// }
// }
// // heapsort in each work-item
// int local_start = heapsort(s_key_local, s_val_local, counter);
// counter -= local_start;
// s_start[local_id] = local_start;
// s_count[local_id] = counter;
// d_csrRowPtrC[rowidC] = counter;
// }
// __syncthreads();
// // write compressed lists to global mem
// int base_index;
// int local_offset;
// int loop = group_id != num_groups - 1 ? local_size : queue_size - group_id * local_size;
// for (int i = 0; i < loop; i++)
// {
// base_index = d_queue[TUPLE_QUEUE * (d_queue_offset + group_id * local_size + i) + 1] + local_id;
// local_offset = c_segmentsize * i + local_id + s_start[i];
// if (local_id < s_count[i])
// {
// d_csrColIndCt[base_index] = s_key[local_offset];
// d_csrValCt[base_index] = s_val[local_offset];
// }
// if (local_id + local_size < s_count[i])
// {
// base_index += local_size;
// local_offset += local_size;
// d_csrColIndCt[base_index] = s_key[local_offset];
// d_csrValCt[base_index] = s_val[local_offset];
// }
// }
//}
__inline__ __device__ void
siftDown(int *s_key,
value_type *s_val,
const int start,
const int stop,
const int local_id,
const int local_size)
{
int root = start;
int child, swap;
int temp_swap_key;
value_type temp_swap_val;
while (root * 2 + 1 <= stop)
{
child = root * 2 + 1;
swap = root;
if (s_key[swap * local_size + local_id] < s_key[child * local_size + local_id])
swap = child;
if (child + 1 <= stop && s_key[swap * local_size + local_id] < s_key[(child + 1) * local_size + local_id])
swap = child + 1;
if (swap != root)
{
const int index1 = root * local_size + local_id;
const int index2 = swap * local_size + local_id;
//swap root and swap
temp_swap_key = s_key[index1];
s_key[index1] = s_key[index2];
s_key[index2] = temp_swap_key;
temp_swap_val = s_val[index1];
s_val[index1] = s_val[index2];
s_val[index2] = temp_swap_val;
root = swap;
}
else
return;
}
}
__inline__ __device__ int
heapsort(int *s_key,
value_type *s_val,
const int segment_size,
const int local_id,
const int local_size)
{
// heapsort - heapify max-heap
int start = (segment_size - 1) / 2;
int stop = segment_size - 1;
int index1, index2;
while (start >= 0)
{
siftDown(s_key, s_val, start, stop, local_id, local_size);
start--;
}
// inject root element to the end
int temp_swap_key;
value_type temp_swap_val;
index1 = stop * local_size + local_id;
temp_swap_key = s_key[local_id];
s_key[local_id] = s_key[index1];
s_key[index1] = temp_swap_key;
temp_swap_val = s_val[local_id];
s_val[local_id] = s_val[index1];
s_val[index1] = temp_swap_val;
stop--;
siftDown(s_key, s_val, 0, stop, local_id, local_size);
// this start is compressed list's start
start = segment_size - 1;
// heapsort - remove-max and compress
while (stop >= 0)
{
index2 = stop * local_size + local_id;
if (s_key[local_id] == s_key[start * local_size + local_id])
{
s_val[start * local_size + local_id] += s_val[local_id];
s_key[local_id] = s_key[index2];
s_val[local_id] = s_val[index2];
}
else
{
start--;
index1 = start * local_size + local_id;
if (stop == start)
{
temp_swap_key = s_key[local_id];
s_key[local_id] = s_key[index2];
s_key[index2] = temp_swap_key;
temp_swap_val = s_val[local_id];
s_val[local_id] = s_val[index2];
s_val[index2] = temp_swap_val;
}
else
{
s_key[index1] = s_key[local_id];
s_val[index1] = s_val[local_id];
s_key[local_id] = s_key[index2];
s_val[local_id] = s_val[index2];
}
}
stop--;
siftDown(s_key, s_val, 0, stop, local_id, local_size);
}
return start;
}
template<typename vT, int c_segmentsize>
__global__ void
ESC_2heap_noncoalesced(const int* d_queue,
const int* d_csrRowPtrA,
const int* __restrict__ d_csrColIndA,
const vT* __restrict__ d_csrValA,
const int* d_csrRowPtrB,
const int* __restrict__ d_csrColIndB,
const vT* __restrict__ d_csrValB,
int* d_csrRowPtrC,
const int* d_csrRowPtrCt,
int* d_csrColIndCt,
vT* d_csrValCt,
const int queue_size,
const int d_queue_offset)
{
__shared__ int s_key[c_segmentsize * WARPSIZE_NV_2HEAP];
__shared__ vT s_val[c_segmentsize * WARPSIZE_NV_2HEAP];
const int local_id = threadIdx.x;
const int group_id = blockIdx.x;
const int global_id = blockIdx.x * blockDim.x + threadIdx.x;
const int local_size = blockDim.x;
int index = 0;
if (global_id < queue_size)
{
int i, counter = 0;
int start_col_index_A, stop_col_index_A;
int rowidB, start_col_index_B, stop_col_index_B;
vT value_A;
int rowidC = d_queue[TUPLE_QUEUE * (d_queue_offset + global_id)];
start_col_index_A = d_csrRowPtrA[rowidC];
stop_col_index_A = d_csrRowPtrA[rowidC + 1];
// i is both col index of A and row index of B
for (i = start_col_index_A; i < stop_col_index_A; i++)
{
rowidB = d_csrColIndA[i];
value_A = d_csrValA[i];
start_col_index_B = d_csrRowPtrB[rowidB];
stop_col_index_B = d_csrRowPtrB[rowidB + 1];
for (int j = start_col_index_B; j < stop_col_index_B; j++)
{
index = counter * local_size + local_id;
s_key[index] = d_csrColIndB[j];
s_val[index] = d_csrValB[j] * value_A;
counter++;
}
}
// heapsort in each work-item
int local_start = heapsort(s_key, s_val, counter, local_id, local_size);
counter -= local_start;
d_csrRowPtrC[rowidC] = counter;
int base_index = d_queue[TUPLE_QUEUE * (d_queue_offset + group_id * local_size + local_id) + 1];;
for (int i = 0; i < counter; i++)
{
d_csrColIndCt[base_index + i] = s_key[(local_start+i) * local_size + local_id];
d_csrValCt[base_index + i] = s_val[(local_start+i) * local_size + local_id];
}
}
}
int bhsparse_cuda::compute_nnzC_Ct_2heap_noncoalesced(int num_threads, int num_blocks, int j, int counter, int position)
{
hipError_t err = hipSuccess;
StopWatchInterface *timer = NULL;
if (_profiling)
{
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
}
switch (j)
{
case 2:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 2>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 3:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 3>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 4:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 4>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 5:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 5>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 6:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 6>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 7:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 7>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 8:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 8>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 9:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 9>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 10:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 10>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 11:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 11>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 12:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 12>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 13:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 13>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 14:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 14>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 15:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 15>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 16:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 16>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 17:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 17>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 18:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 18>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 19:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 19>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 20:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 20>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 21:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 21>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 22:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 22>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 23:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 23>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 24:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 24>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 25:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 25>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 26:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 26>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 27:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 27>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 28:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 28>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 29:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 29>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 30:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 30>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 31:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 31>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 32:
hipLaunchKernelGGL(( ESC_2heap_noncoalesced<value_type, 32>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
}
err = hipGetLastError();
if (err != hipSuccess)
{ cout << "err = " << hipGetErrorString(err) << endl; return -1; }
if (_profiling)
{
err = hipDeviceSynchronize();
if (err != hipSuccess)
{ cout << "err = " << hipGetErrorString(err) << endl; return -1; }
sdkStopTimer(&timer);
cout << "[ " << j << " ] ESC_2heap time: " << sdkGetTimerValue(&timer) << " ms" << endl;
}
return BHSPARSE_SUCCESS;
}
__inline__ __device__
void coex(int *keyA,
value_type *valA,
int *keyB,
value_type *valB,
const int dir)
{
int t;
value_type v;
if ((*keyA > *keyB) == dir)
{
t = *keyA;
*keyA = *keyB;
*keyB = t;
v = *valA;
*valA = *valB;
*valB = v;
}
}
__inline__ __device__
void oddeven(int *s_key,
value_type *s_val,
int arrayLength)
{
int dir = 1;
for (int size = 2; size <= arrayLength; size <<= 1)
{
int stride = size >> 1;
int offset = threadIdx.x & (stride - 1);
{
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
coex(&s_key[pos], &s_val[pos], &s_key[pos + stride], &s_val[pos + stride], dir);
stride >>= 1;
}
for (; stride > 0; stride >>= 1)
{
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
if (offset >= stride)
coex(&s_key[pos - stride], &s_val[pos - stride], &s_key[pos], &s_val[pos], dir);
}
}
}
template<typename T>
__inline__ __device__
T scan_32_shfl(T x, const int local_id)
{
#pragma unroll
for( int offset = 1 ; offset < WARPSIZE_NV ; offset <<= 1 )
{
T y = __shfl_up(x, offset);
if(local_id >= offset)
x += y;
}
return x;
}
template<typename T>
__inline__ __device__
void scan_single( volatile T *s_scan,
const int local_id,
const int l)
{
T old_val, new_val;
if (!local_id)
{
old_val = s_scan[0];
s_scan[0] = 0;
for (int i = 1; i < l; i++)
{
new_val = s_scan[i];
s_scan[i] = old_val + s_scan[i-1];
old_val = new_val;
}
}
}
template<typename T>
__inline__ __device__
T scan_plus1_shfl(volatile T *s_scan,
const int local_id,
T r_in,
const int seg_num)
{
// 3-stage method. scan-scan-propogate
// shfl version
const int lane_id = local_id % WARPSIZE_NV;
const int seg_id = local_id / WARPSIZE_NV;
// stage 1. thread bunch scan
T r_scan = 0;
//if (seg_id < seg_num)
//{
r_scan = scan_32_shfl<T>(r_in, lane_id);
if (lane_id == WARPSIZE_NV - 1)
s_scan[seg_id] = r_scan;
r_scan = __shfl_up(r_scan, 1);
r_scan = lane_id ? r_scan : 0;
//}
__syncthreads();
// stage 2. one thread bunch scan
r_in = (local_id < seg_num) ? s_scan[local_id] : 0;
if (!seg_id)
r_in = scan_32_shfl<T>(r_in, lane_id);
if (local_id < seg_num)
s_scan[local_id + 1] = r_in;
// single thread in-place scan
//scan_single<T>(s_scan, local_id, seg_num+1);
__syncthreads();
// stage 3. propogate (element-wise add) to all
if (seg_id) // && seg_id < seg_num)
r_scan += s_scan[seg_id];
return r_scan;
}
template<typename sT, typename T>
__inline__ __device__
void scan_double_width_plus1_shfl(volatile sT *s_scan,
volatile T *s_scan_shfl,
const int local_id,
T r_in,
T r_in_halfwidth,
const int seg_num)
{
// 3-stage method. scan-scan-propogate
// shfl version
const int lane_id = local_id % WARPSIZE_NV;
const int seg_id = local_id / WARPSIZE_NV;
// stage 1. thread bunch scan
T r_scan = scan_32_shfl<T>(r_in, lane_id);
T r_scan_halfwidth = scan_32_shfl<T>(r_in_halfwidth, lane_id);
if (lane_id == WARPSIZE_NV - 1)
{
s_scan_shfl[seg_id] = r_scan;
s_scan_shfl[seg_id + seg_num] = r_scan_halfwidth;
}
// inclusive to exclusive
r_scan = __shfl_up(r_scan, 1);
r_scan_halfwidth = __shfl_up(r_scan_halfwidth, 1);
r_scan = lane_id ? r_scan : 0;
r_scan_halfwidth = lane_id ? r_scan_halfwidth : 0;
__syncthreads();
// stage 2. one thread bunch scan
r_in = (local_id < 2 * seg_num) ? s_scan_shfl[local_id] : 0;
if (!seg_id)
r_in = scan_32_shfl<T>(r_in, lane_id);
if (local_id < 2 * seg_num)
s_scan_shfl[local_id + 1] = r_in;
// single thread in-place scan
//scan_single<T>(s_scan_shfl, local_id, seg_num+1);
__syncthreads();
// stage 3. propogate (element-wise add) to all
if (seg_id)
{
r_scan += s_scan_shfl[seg_id];
}
r_scan_halfwidth += s_scan_shfl[seg_id + seg_num];
s_scan[local_id] = r_scan;
s_scan[local_id + blockDim.x] = r_scan_halfwidth;
if (!local_id)
s_scan[2 * blockDim.x] = s_scan_shfl[2 * seg_num];
return;
}
__inline__ __device__
void scan_32(volatile short *s_scan)
{
int ai, bi;
int baseai = 1 + 2 * threadIdx.x;
int basebi = baseai + 1;
short temp;
if (threadIdx.x < 16) { ai = baseai - 1; bi = basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 8) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 4) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 2) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x == 0) { s_scan[31] += s_scan[15]; s_scan[32] = s_scan[31]; s_scan[31] = 0; temp = s_scan[15]; s_scan[15] = 0; s_scan[31] += temp; }
if (threadIdx.x < 2) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 4) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 8) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 16) { ai = baseai - 1; bi = basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp; }
}
__inline__ __device__
void scan_64(volatile short *s_scan)
{
int ai, bi;
int baseai = 1 + 2 * threadIdx.x;
int basebi = baseai + 1;
short temp;
if (threadIdx.x < 32) { ai = baseai - 1; bi = basebi - 1; s_scan[bi] += s_scan[ai]; }
__syncthreads();
if (threadIdx.x < 16) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 8) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 4) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 2) { ai = 16 * baseai - 1; bi = 16 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x == 0) { s_scan[63] += s_scan[31]; s_scan[64] = s_scan[63]; s_scan[63] = 0; temp = s_scan[31]; s_scan[31] = 0; s_scan[63] += temp; }
if (threadIdx.x < 2) { ai = 16 * baseai - 1; bi = 16 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 4) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 8) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 16) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 32) { ai = baseai - 1; bi = basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp; }
}
__inline__ __device__
void scan_128(volatile short *s_scan)
{
int ai, bi;
int baseai = 1 + 2 * threadIdx.x;
int basebi = baseai + 1;
short temp;
if (threadIdx.x < 64) { ai = baseai - 1; bi = basebi - 1; s_scan[bi] += s_scan[ai]; }
__syncthreads();
if (threadIdx.x < 32) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; s_scan[bi] += s_scan[ai]; }
__syncthreads();
if (threadIdx.x < 16) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 8) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 4) { ai = 16 * baseai - 1; bi = 16 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 2) { ai = 32 * baseai - 1; bi = 32 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x == 0) { s_scan[127] += s_scan[63]; s_scan[128] = s_scan[127]; s_scan[127] = 0; temp = s_scan[63]; s_scan[63] = 0; s_scan[127] += temp; }
if (threadIdx.x < 2) { ai = 32 * baseai - 1; bi = 32 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 4) { ai = 16 * baseai - 1; bi = 16 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 8) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 16) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 32) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
__syncthreads();
if (threadIdx.x < 64) { ai = baseai - 1; bi = basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp; }
}
__inline__ __device__
void scan_256(volatile short *s_scan)
{
int ai, bi;
int baseai = 1 + 2 * threadIdx.x;
int basebi = baseai + 1;
short temp;
if (threadIdx.x < 128) { ai = baseai - 1; bi = basebi - 1; s_scan[bi] += s_scan[ai]; }
__syncthreads();
if (threadIdx.x < 64) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; s_scan[bi] += s_scan[ai]; }
__syncthreads();
if (threadIdx.x < 32) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; s_scan[bi] += s_scan[ai]; }
__syncthreads();
if (threadIdx.x < 16) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 8) { ai = 16 * baseai - 1; bi = 16 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 4) { ai = 32 * baseai - 1; bi = 32 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 2) { ai = 64 * baseai - 1; bi = 64 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x == 0) { s_scan[255] += s_scan[127]; s_scan[256] = s_scan[255]; s_scan[255] = 0; temp = s_scan[127]; s_scan[127] = 0; s_scan[255] += temp; }
if (threadIdx.x < 2) { ai = 64 * baseai - 1; bi = 64 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 4) { ai = 32 * baseai - 1; bi = 32 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 8) { ai = 16 * baseai - 1; bi = 16 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 16) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 32) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
__syncthreads();
if (threadIdx.x < 64) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
__syncthreads();
if (threadIdx.x < 128) { ai = baseai - 1; bi = basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp; }
}
__inline__ __device__
void scan_512(volatile short *s_scan)
{
int ai, bi;
int baseai = 1 + 2 * threadIdx.x;
int basebi = baseai + 1;
short temp;
if (threadIdx.x < 256) { ai = baseai - 1; bi = basebi - 1; s_scan[bi] += s_scan[ai]; }
__syncthreads();
if (threadIdx.x < 128) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; s_scan[bi] += s_scan[ai]; }
__syncthreads();
if (threadIdx.x < 64) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; s_scan[bi] += s_scan[ai]; }
__syncthreads();
if (threadIdx.x < 32) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; s_scan[bi] += s_scan[ai]; }
__syncthreads();
if (threadIdx.x < 16) { ai = 16 * baseai - 1; bi = 16 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 8) { ai = 32 * baseai - 1; bi = 32 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 4) { ai = 64 * baseai - 1; bi = 64 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 2) { ai = 128 * baseai - 1; bi = 128 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x == 0) { s_scan[511] += s_scan[255]; s_scan[512] = s_scan[511]; s_scan[511] = 0; temp = s_scan[255]; s_scan[255] = 0; s_scan[511] += temp; }
if (threadIdx.x < 2) { ai = 128 * baseai - 1; bi = 128 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 4) { ai = 64 * baseai - 1; bi = 64 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 8) { ai = 32 * baseai - 1; bi = 32 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 16) { ai = 16 * baseai - 1; bi = 16 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 32) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
__syncthreads();
if (threadIdx.x < 64) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
__syncthreads();
if (threadIdx.x < 128) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
__syncthreads();
if (threadIdx.x < 256) { ai = baseai - 1; bi = basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp; }
}
__inline__ __device__
void compression_scan(volatile short *s_scan,
volatile int *s_scan_shfl,
int *s_key,
value_type *s_val,
const int local_counter,
const int local_size,
const int local_id,
const int local_id_halfwidth)
{
// compression - prefix sum
bool duplicate = 1;
bool duplicate_halfwidth = 1;
// generate bool value in registers
if (local_id < local_counter && local_id > 0)
duplicate = (s_key[local_id] != s_key[local_id - 1]);
if (local_id_halfwidth < local_counter)
duplicate_halfwidth = (s_key[local_id_halfwidth] != s_key[local_id_halfwidth - 1]);
#if __CUDA_ARCH__ >= 300
scan_double_width_plus1_shfl<short, int>(s_scan, s_scan_shfl, local_id,
duplicate, duplicate_halfwidth, local_size/WARPSIZE_NV);
#else
// copy bool values from register to local memory (s_scan)
s_scan[local_id] = duplicate;
s_scan[local_id_halfwidth] = duplicate_halfwidth;
__syncthreads();
// in-place exclusive prefix-sum scan on s_scan
switch (local_size)
{
case 16:
scan_32(s_scan);
break;
case 32:
scan_64(s_scan);
break;
case 64:
scan_128(s_scan);
break;
case 128:
scan_256(s_scan);
break;
case 256:
scan_512(s_scan);
break;
}
#endif
__syncthreads();
// compute final position and final value in registers
int move_pointer;
short final_position, final_position_halfwidth;
int final_key, final_key_halfwidth;
value_type final_value, final_value_halfwidth;
if (local_id < local_counter && duplicate == 1)
{
final_position = s_scan[local_id];
final_key = s_key[local_id];
final_value = s_val[local_id];
move_pointer = local_id + 1;
while (s_scan[move_pointer] == s_scan[move_pointer + 1])
{
final_value += s_val[move_pointer];
move_pointer++;
}
}
if (local_id_halfwidth < local_counter && duplicate_halfwidth == 1)
{
final_position_halfwidth = s_scan[local_id_halfwidth];
final_key_halfwidth = s_key[local_id_halfwidth];
final_value_halfwidth = s_val[local_id_halfwidth];
move_pointer = local_id_halfwidth + 1;
while (s_scan[move_pointer] == s_scan[move_pointer + 1] && move_pointer < 2 * local_size)
{
final_value_halfwidth += s_val[move_pointer];
move_pointer++;
}
}
__syncthreads();
// write final_positions and final_values to s_key and s_val
if (local_id < local_counter && duplicate == 1)
{
s_key[final_position] = final_key;
s_val[final_position] = final_value;
}
if (local_id_halfwidth < local_counter && duplicate_halfwidth == 1)
{
s_key[final_position_halfwidth] = final_key_halfwidth;
s_val[final_position_halfwidth] = final_value_halfwidth;
}
}
template<typename vT, int c_scansize>
__global__
void ESC_bitonic_scan(const int* d_queue,
const int* d_csrRowPtrA,
const int* __restrict__ d_csrColIndA,
const vT* __restrict__ d_csrValA,
const int* d_csrRowPtrB,
const int* d_csrColIndB,
const vT* d_csrValB,
int* d_csrRowPtrC,
int* d_csrColIndCt,
vT* d_csrValCt,
const int queue_offset,
const int n)
{
__shared__ int s_key[2 * c_scansize];
__shared__ vT s_val[2 * c_scansize];
__shared__ short s_scan[2 * c_scansize + 1];
#if __CUDA_ARCH__ >= 300
volatile __shared__ int s_scan_shfl[2 * c_scansize / WARPSIZE_NV + 1];
#else
volatile __shared__ int *s_scan_shfl;
#endif
int local_id = threadIdx.x;
int group_id = blockIdx.x;
int local_size = blockDim.x;
int width = local_size * 2;
int i, local_counter = 0;
int strideB, local_offset, global_offset;
int invalid_width;
int local_id_halfwidth = local_id + local_size;
int row_id_B; // index_type
int row_id;// index_type
row_id = d_queue[TUPLE_QUEUE * (queue_offset + group_id)];
int start_col_index_A, stop_col_index_A; // index_type
int start_col_index_B, stop_col_index_B; // index_type
vT value_A; // value_type
start_col_index_A = d_csrRowPtrA[row_id];
stop_col_index_A = d_csrRowPtrA[row_id + 1];
// i is both col index of A and row index of B
for (i = start_col_index_A; i < stop_col_index_A; i++)
{
row_id_B = d_csrColIndA[i];
value_A = d_csrValA[i];
start_col_index_B = d_csrRowPtrB[row_id_B];
stop_col_index_B = d_csrRowPtrB[row_id_B + 1];
strideB = stop_col_index_B - start_col_index_B;
if (local_id < strideB)
{
local_offset = local_counter + local_id;
global_offset = start_col_index_B + local_id;
s_key[local_offset] = d_csrColIndB[global_offset];
s_val[local_offset] = d_csrValB[global_offset] * value_A;
}
if (local_id_halfwidth < strideB)
{
local_offset = local_counter + local_id_halfwidth;
global_offset = start_col_index_B + local_id_halfwidth;
s_key[local_offset] = d_csrColIndB[global_offset];
s_val[local_offset] = d_csrValB[global_offset] * value_A;
}
local_counter += strideB;
}
__syncthreads();
invalid_width = width - local_counter;
// to meet 2^N, set the rest elements to n (number of columns of C)
if (local_id < invalid_width)
s_key[local_counter + local_id] = n;
//if (local_id_halfwidth < invalid_width)
// s_key[local_counter + local_id_halfwidth] = n;
__syncthreads();
// bitonic sort
oddeven(s_key, s_val, width);
__syncthreads();
// compression - scan
compression_scan(s_scan, s_scan_shfl, s_key, s_val, local_counter,
local_size, local_id, local_id_halfwidth);
__syncthreads();
local_counter = s_scan[width] - invalid_width;
if (local_id == 0)
d_csrRowPtrC[row_id] = local_counter;
// write compressed lists to global mem
int row_offset = d_queue[TUPLE_QUEUE * (queue_offset + group_id) + 1]; //d_csrRowPtrCt[row_id];
if (local_id < local_counter)
{
global_offset = row_offset + local_id;
d_csrColIndCt[global_offset] = s_key[local_id];
d_csrValCt[global_offset] = s_val[local_id];
}
if (local_id_halfwidth < local_counter)
{
global_offset = row_offset + local_id_halfwidth;
d_csrColIndCt[global_offset] = s_key[local_id_halfwidth];
d_csrValCt[global_offset] = s_val[local_id_halfwidth];
}
}
int bhsparse_cuda::compute_nnzC_Ct_bitonic(int num_threads, int num_blocks, int j, int position)
{
hipError_t err = hipSuccess;
StopWatchInterface *timer = NULL;
if (_profiling)
{
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
}
switch (num_threads)
{
case 16:
hipLaunchKernelGGL(( ESC_bitonic_scan<value_type, 16>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one, _d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt,
position, _n);
break;
case 32:
hipLaunchKernelGGL(( ESC_bitonic_scan<value_type, 32>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one, _d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt,
position, _n);
break;
case 64:
hipLaunchKernelGGL(( ESC_bitonic_scan<value_type, 64>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one, _d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt,
position, _n);
break;
case 128:
hipLaunchKernelGGL(( ESC_bitonic_scan<value_type, 128>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one, _d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt,
position, _n);
break;
case 256:
hipLaunchKernelGGL(( ESC_bitonic_scan<value_type, 256>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one, _d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt,
position, _n);
break;
}
err = hipGetLastError();
if (err != hipSuccess)
{ cout << "err = " << hipGetErrorString(err) << endl; return -1; }
if (_profiling)
{
err = hipDeviceSynchronize();
if (err != hipSuccess)
{ cout << "err = " << hipGetErrorString(err) << endl; return -1; }
sdkStopTimer(&timer);
cout << "[ " << j << " ] ESC_bitonic time: " << sdkGetTimerValue(&timer) << " ms" << endl;
}
return BHSPARSE_SUCCESS;
}
__global__
void ESC_0_cudakernel(const int* d_queue,
int* d_csrRowPtrC,
const int queue_size,
const int queue_offset)
{
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < queue_size)
{
int row_id = d_queue[TUPLE_QUEUE * (queue_offset + global_id)];
d_csrRowPtrC[row_id] = 0;
}
}
__global__
void ESC_1_cudakernel(const int* d_queue,
const int* d_csrRowPtrA,
const int* __restrict__ d_csrColIndA,
const value_type* __restrict__ d_csrValA,
const int* d_csrRowPtrB,
const int* d_csrColIndB,
const value_type* d_csrValB,
int* d_csrRowPtrC,
const int* d_csrRowPtrCt,
int* d_csrColIndCt,
value_type* d_csrValCt,
const int queue_size,
const int queue_offset)
{
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < queue_size)
{
int row_id = d_queue[TUPLE_QUEUE * (queue_offset + global_id)];
d_csrRowPtrC[row_id] = 1;
int base_index = d_queue[TUPLE_QUEUE * (queue_offset + global_id) + 1]; //d_csrRowPtrCt[row_id];
int col_index_A_start = d_csrRowPtrA[row_id];
int col_index_A_stop = d_csrRowPtrA[row_id+1];
for (int col_index_A = col_index_A_start; col_index_A < col_index_A_stop; col_index_A++)
{
int row_id_B = d_csrColIndA[col_index_A];
int col_index_B = d_csrRowPtrB[row_id_B];
if (col_index_B == d_csrRowPtrB[row_id_B+1])
continue;
value_type value_A = d_csrValA[col_index_A];
d_csrColIndCt[base_index] = d_csrColIndB[col_index_B];
d_csrValCt[base_index] = d_csrValB[col_index_B] * value_A;
break;
}
}
}
int bhsparse_cuda::compute_nnzC_Ct_0(int num_threads, int num_blocks, int j, int counter, int position)
{
hipError_t err = hipSuccess;
StopWatchInterface *timer = NULL;
if (_profiling)
{
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
}
hipLaunchKernelGGL(( ESC_0_cudakernel), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one, _d_csrRowPtrC, counter, position);
err = hipGetLastError();
if (err != hipSuccess)
{ cout << "err = " << hipGetErrorString(err) << endl; return -1; }
if (_profiling)
{
err = hipDeviceSynchronize();
if (err != hipSuccess)
{ cout << "err = " << hipGetErrorString(err) << endl; return -1; }
sdkStopTimer(&timer);
cout << "[ " << j << " ] ESC_0 time: " << sdkGetTimerValue(&timer) << " ms" << endl;
}
return BHSPARSE_SUCCESS;
}
int bhsparse_cuda::compute_nnzC_Ct_1(int num_threads, int num_blocks, int j, int counter, int position)
{
hipError_t err = hipSuccess;
StopWatchInterface *timer = NULL;
if (_profiling)
{
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
}
hipLaunchKernelGGL(( ESC_1_cudakernel), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
err = hipGetLastError();
if (err != hipSuccess)
{ cout << "err = " << hipGetErrorString(err) << endl; return -1; }
if (_profiling)
{
err = hipDeviceSynchronize();
if (err != hipSuccess)
{ cout << "err = " << hipGetErrorString(err) << endl; return -1; }
sdkStopTimer(&timer);
cout << "[ " << j << " ] ESC_0 time: " << sdkGetTimerValue(&timer) << " ms" << endl;
}
return BHSPARSE_SUCCESS;
}
__inline__ __device__
void binarysearch_sub(int *s_key,
value_type *s_val,
int key_input,
value_type val_input,
int merged_size)
{
int start = 0;
int stop = merged_size - 1;
int median;
int key_median;
while (stop >= start)
{
median = (stop + start) / 2;
key_median = s_key[median];
if (key_input > key_median)
start = median + 1;
else if (key_input < key_median)
stop = median - 1;
else
{
// atomicAdd is not needed since duplicate is not existed in each input row
s_val[median] -= val_input;
break;
}
}
//return start;
}
__inline__ __device__
void binarysearch(int *s_key,
value_type *s_val,
int key_input,
value_type val_input,
int merged_size,
bool *is_new_col)
{
int start = 0;
int stop = merged_size - 1;
int median;
int key_median;
while (stop >= start)
{
median = (stop + start) / 2;
key_median = s_key[median];
if (key_input > key_median)
start = median + 1;
else if (key_input < key_median)
stop = median - 1;
else
{
// atomicAdd is not needed since duplicate is not existed in each input row
s_val[median] += val_input;
*is_new_col = 0;
break;
}
}
//return start;
}
__inline__ __device__
void scan(volatile short *s_scan)
{
switch (blockDim.x)
{
case 32:
scan_32(s_scan);
break;
case 64:
scan_64(s_scan);
break;
case 128:
scan_128(s_scan);
break;
case 256:
scan_256(s_scan);
break;
case 512:
scan_512(s_scan);
break;
}
}
__inline__ __device__
bool comp(int a, int b)
{
return a < b ? true : false;
}
__inline__ __device__
int mergepath_partition(int *a,
const int aCount,
int *b,
const int bCount,
const int diag)
{
int begin = max(0, diag - bCount);
int end = min(diag, aCount);
int mid;
int key_a, key_b;
bool pred;
while(begin < end)
{
mid = (begin + end) >> 1;
key_a = a[mid];
key_b = b[diag - 1 - mid];
pred = comp(key_a, key_b);
if(pred)
begin = mid + 1;
else
end = mid;
}
return begin;
}
__inline__ __device__
void mergepath_serialmerge(int *s_key,
value_type *s_val,
int aBegin,
const int aEnd,
int bBegin,
const int bEnd,
int *reg_key,
value_type *reg_val,
const int VT)
{
int key_a = s_key[aBegin];
int key_b = s_key[bBegin];
bool p;
for(int i = 0; i < VT; ++i)
{
p = (bBegin >= bEnd) || ((aBegin < aEnd) && !comp(key_b, key_a));
reg_key[i] = p ? key_a : key_b;
reg_val[i] = p ? s_val[aBegin] : s_val[bBegin];
if(p)
key_a = s_key[++aBegin];
else
key_b = s_key[++bBegin];
}
}
__inline__ __device__
void readwrite_mergedlist(int *d_csrColIndCt,
value_type *d_csrValCt,
int *s_key_merged,
value_type *s_val_merged,
const int merged_size,
const int row_offset,
const bool is_write)
{
int stride, offset_local_id, global_offset;
int loop = ceil((float)merged_size / (float)blockDim.x);
for (int i = 0; i < loop; i++)
{
stride = i != loop - 1 ? blockDim.x : merged_size - i * blockDim.x;
offset_local_id = i * blockDim.x + threadIdx.x;
global_offset = row_offset + offset_local_id;
if (threadIdx.x < stride)
{
if (is_write)
{
d_csrColIndCt[global_offset] = s_key_merged[offset_local_id];
d_csrValCt[global_offset] = s_val_merged[offset_local_id];
}
else
{
s_key_merged[offset_local_id] = d_csrColIndCt[global_offset];
s_val_merged[offset_local_id] = d_csrValCt[global_offset];
}
}
}
}
template<typename vT, int c_buffsize, int c_scansize>
__global__
void EM_mergepath(int * d_queue,
const int * d_csrRowPtrA,
const int * __restrict__ d_csrColIndA,
const vT * __restrict__ d_csrValA,
const int * d_csrRowPtrB,
const int * d_csrColIndB,
const vT * d_csrValB,
int *d_csrRowPtrC,
int *d_csrColIndCt,
vT *d_csrValCt,
const int queue_offset)
{
__shared__ int s_key_merged[c_buffsize+1];
__shared__ vT s_val_merged[c_buffsize+1];
#if __CUDA_ARCH__ >= 300
int seg_num = c_scansize / WARPSIZE_NV;
volatile __shared__ int s_scan[c_scansize / WARPSIZE_NV + 1];
#else
volatile __shared__ short s_scan[c_scansize+1];
#endif
const int queue_id = TUPLE_QUEUE * (queue_offset + blockIdx.x);
// if merged size equals -1, kernel return since this row is done
int merged_size = d_queue[queue_id + 2];
const int local_id = threadIdx.x; //threadIdx.x;
const int row_id = d_queue[queue_id];
const int local_size = blockDim.x;
const float local_size_value_type = local_size;
int reg_reuse1;
int col_Ct; // index_type
vT val_Ct; // value_type
vT val_A; // value_type
int start_col_index_A, stop_col_index_A; // index_type
int start_col_index_B, stop_col_index_B; // index_type
bool is_new_col;
bool is_last;
int VT, diag, mp;
int reg_key[16];
vT reg_val[16];
start_col_index_A = d_csrRowPtrA[row_id];
stop_col_index_A = d_csrRowPtrA[row_id + 1];
if (merged_size == 0)
{
is_last = false;
// read the first set of current nnzCt row to merged list
reg_reuse1 = d_csrColIndA[start_col_index_A]; // reg_reuse1 = row_id_B
val_A = d_csrValA[start_col_index_A];
start_col_index_B = d_csrRowPtrB[reg_reuse1]; // reg_reuse1 = row_id_B
stop_col_index_B = d_csrRowPtrB[reg_reuse1 + 1]; // reg_reuse1 = row_id_B
const int stride = stop_col_index_B - start_col_index_B;
const int loop = ceil((float)stride / local_size_value_type); //ceil((value_type)stride / (value_type)local_size);
start_col_index_B += local_id;
for (int k = 0; k < loop; k++)
{
reg_reuse1 = k != loop - 1 ? local_size : stride - k * local_size; // reg_reuse1 = input_size
// if merged_size + reg_reuse1 > c_buffsize, write it to global mem and return
if (merged_size + reg_reuse1 > c_buffsize)
{
// write a signal to some place, not equals -1 means next round is needed
if (local_id == 0)
{
d_queue[queue_id + 2] = merged_size;
d_queue[queue_id + 3] = start_col_index_A;
d_queue[queue_id + 4] = start_col_index_B;
}
// dump current data to global mem
reg_reuse1 = d_queue[queue_id + 1];
readwrite_mergedlist(d_csrColIndCt, d_csrValCt, s_key_merged, s_val_merged, merged_size, reg_reuse1, 1);
return;
}
if (start_col_index_B < stop_col_index_B)
{
col_Ct = d_csrColIndB[start_col_index_B];
val_Ct = d_csrValB[start_col_index_B] * val_A;
s_key_merged[merged_size + local_id] = col_Ct;
s_val_merged[merged_size + local_id] = val_Ct;
}
merged_size += reg_reuse1; // reg_reuse1 = input_size
start_col_index_B += local_size;
}
start_col_index_A++;
}
else
{
is_last = true;
start_col_index_A = d_queue[queue_id + 3];
// load existing merged list
reg_reuse1 = d_queue[queue_id + 5];
readwrite_mergedlist(d_csrColIndCt, d_csrValCt, s_key_merged, s_val_merged, merged_size, reg_reuse1, 0);
}
__syncthreads();
// merge the rest of sets of current nnzCt row to the merged list
while (start_col_index_A < stop_col_index_A)
{
reg_reuse1 = d_csrColIndA[start_col_index_A]; // reg_reuse1 = row_id_B
val_A = d_csrValA[start_col_index_A];
start_col_index_B = is_last ? d_queue[queue_id + 4] : d_csrRowPtrB[reg_reuse1]; // reg_reuse1 = row_id_B
is_last = false;
stop_col_index_B = d_csrRowPtrB[reg_reuse1 + 1]; // reg_reuse1 = row_id_B
const int stride = stop_col_index_B - start_col_index_B;
const int loop = ceil((float)stride / local_size_value_type); //ceil((value_type)stride / (value_type)local_size);
//int start_col_index_B_zeropoint = start_col_index_B;
start_col_index_B += local_id;
for (int k = 0; k < loop; k++)
{
__syncthreads();
is_new_col = 0;
if (start_col_index_B < stop_col_index_B)
{
col_Ct = d_csrColIndB[start_col_index_B];
val_Ct = d_csrValB[start_col_index_B] * val_A;
// binary search on existing sorted list
// if the column is existed, add the value to the position
// else, set scan value to 1, and wait for scan
is_new_col = 1;
binarysearch(s_key_merged, s_val_merged, col_Ct, val_Ct, merged_size, &is_new_col);
}
#if __CUDA_ARCH__ >= 300
//const int seg_num = (k == loop - 1) ?
// ceil((float)(stop_col_index_B - start_col_index_B_zeropoint) / (float)WARPSIZE_NV) :
// local_size / WARPSIZE_NV;
//if (!local_id)
// printf("blockIdx = %d, seg_num = %d\n", blockIdx.x, seg_num);
int r_scan = scan_plus1_shfl<int>(s_scan, local_id, is_new_col, seg_num);
const int s_scan_sum = s_scan[seg_num];
#else
s_scan[local_id] = is_new_col;
__syncthreads();
// scan with half-local_size work-items
// s_scan[local_size] is the size of input non-duplicate array
scan(s_scan);
__syncthreads();
const int s_scan_sum = s_scan[local_size];
#endif
// if all elements are absorbed into merged list,
// the following work in this inner-loop is not needed any more
if (s_scan_sum == 0)
{
start_col_index_B += local_size;
//start_col_index_B_zeropoint += local_size;
continue;
}
// check if the total size is larger than the capicity of merged list
if (merged_size + s_scan_sum > c_buffsize)
{
// roll back 'binary serach plus' in this round
if (start_col_index_B < stop_col_index_B)
{
binarysearch_sub(s_key_merged, s_val_merged, col_Ct, val_Ct, merged_size);
}
__syncthreads();
// write a signal to some place, not equals -1 means next round is needed
if (local_id == 0)
{
d_queue[queue_id + 2] = merged_size;
d_queue[queue_id + 3] = start_col_index_A;
d_queue[queue_id + 4] = start_col_index_B;
}
// dump current data to global mem
reg_reuse1 = d_queue[queue_id + 1]; //d_csrRowPtrCt[row_id];
readwrite_mergedlist(d_csrColIndCt, d_csrValCt, s_key_merged, s_val_merged, merged_size, reg_reuse1, 1);
return;
}
// write compact input to free place in merged list
if(is_new_col)
{
#if __CUDA_ARCH__ >= 300
reg_reuse1 = merged_size + r_scan;
#else
reg_reuse1 = merged_size + s_scan[local_id];
#endif
s_key_merged[reg_reuse1] = col_Ct;
s_val_merged[reg_reuse1] = val_Ct;
}
__syncthreads();
// merge path partition
VT = ceil((float)(merged_size + s_scan_sum) / local_size_value_type);
diag = VT * local_id;
mp = mergepath_partition(s_key_merged, merged_size, &s_key_merged[merged_size], s_scan_sum, diag);
mergepath_serialmerge(s_key_merged, s_val_merged,
mp, merged_size, merged_size + diag - mp, merged_size + s_scan_sum,
reg_key, reg_val, VT);
__syncthreads();
for (int is = 0; is < VT; is++)
{
s_key_merged[diag + is] = reg_key[is];
s_val_merged[diag + is] = reg_val[is];
}
__syncthreads();
merged_size += s_scan_sum;
start_col_index_B += local_size;
//start_col_index_B_zeropoint += local_size;
}
start_col_index_A++;
}
__syncthreads();
if (local_id == 0)
{
d_csrRowPtrC[row_id] = merged_size;
d_queue[queue_id + 2] = -1;
}
// write merged list to global mem
reg_reuse1 = d_queue[queue_id + 1]; //d_csrRowPtrCt[row_id];
readwrite_mergedlist(d_csrColIndCt, d_csrValCt, s_key_merged, s_val_merged, merged_size, reg_reuse1, 1);
}
template<typename vT, int c_buffsize, int c_scansize>
__global__
void EM_mergepath_global(int * d_queue,
const int * d_csrRowPtrA,
const int * __restrict__ d_csrColIndA,
const vT * __restrict__ d_csrValA,
const int * d_csrRowPtrB,
const int * d_csrColIndB,
const vT * d_csrValB,
int *d_csrRowPtrC,
int *d_csrColIndCt,
vT *d_csrValCt,
const int queue_offset)
{
__shared__ int s_key_merged_l1[c_buffsize+1];
__shared__ vT s_val_merged_l1[c_buffsize+1];
#if __CUDA_ARCH__ >= 300
const int seg_num = c_scansize / WARPSIZE_NV;
volatile __shared__ int s_scan[c_scansize / WARPSIZE_NV + 1];
#else
volatile __shared__ short s_scan[c_scansize+1];
#endif
int queue_id = TUPLE_QUEUE * (queue_offset + blockIdx.x);
// if merged size equals -1, kernel return since this row is done
int merged_size_l2 = d_queue[queue_id + 2];
int merged_size_l1 = 0;
int local_id = threadIdx.x; //threadIdx.x;
int row_id = d_queue[queue_id];
int local_size = blockDim.x;
float local_size_value_type = local_size;
int stride, loop;
int reg_reuse1;
int col_Ct; // index_type
vT val_Ct; // vT
vT val_A; // vT
int start_col_index_A, stop_col_index_A; // index_type
int start_col_index_B, stop_col_index_B; // index_type
int k, is;
bool is_new_col;
bool is_last;
int VT, diag, mp;
int reg_key[80];
vT reg_val[80];
start_col_index_A = d_csrRowPtrA[row_id];
stop_col_index_A = d_csrRowPtrA[row_id + 1];
is_last = true;
start_col_index_A = d_queue[queue_id + 3];
// load existing merged list
reg_reuse1 = d_queue[queue_id + 1];
int *d_key_merged = &d_csrColIndCt[reg_reuse1];
vT *d_val_merged = &d_csrValCt[reg_reuse1];
reg_reuse1 = d_queue[queue_id + 5];
readwrite_mergedlist(d_csrColIndCt, d_csrValCt, d_key_merged, d_val_merged, merged_size_l2, reg_reuse1, 0);
__syncthreads();
// merge the rest of sets of current nnzCt row to the merged list
while (start_col_index_A < stop_col_index_A)
{
reg_reuse1 = d_csrColIndA[start_col_index_A]; // reg_reuse1 = row_id_B
val_A = d_csrValA[start_col_index_A];
start_col_index_B = is_last ? d_queue[queue_id + 4] : d_csrRowPtrB[reg_reuse1]; // reg_reuse1 = row_id_B
is_last = false;
stop_col_index_B = d_csrRowPtrB[reg_reuse1 + 1]; // reg_reuse1 = row_id_B
stride = stop_col_index_B - start_col_index_B;
loop = ceil(stride / local_size_value_type); //ceil((value_type)stride / (value_type)local_size);
start_col_index_B += local_id;
for (k = 0; k < loop; k++)
{
__syncthreads();
is_new_col = 0;
if (start_col_index_B < stop_col_index_B)
{
col_Ct = d_csrColIndB[start_col_index_B];
val_Ct = d_csrValB[start_col_index_B] * val_A;
// binary search on existing sorted list
// if the column is existed, add the value to the position
// else, set scan value to 1, and wait for scan
is_new_col = 1;
// search on l2
binarysearch(d_key_merged, d_val_merged, col_Ct, val_Ct, merged_size_l2, &is_new_col);
// search on l1
if (is_new_col == 1)
binarysearch(s_key_merged_l1, s_val_merged_l1, col_Ct, val_Ct, merged_size_l1, &is_new_col);
}
#if __CUDA_ARCH__ >= 300
int r_scan = scan_plus1_shfl<int>(s_scan, local_id, is_new_col, seg_num);
const int s_scan_sum = s_scan[seg_num];
#else
s_scan[local_id] = is_new_col;
__syncthreads();
// scan with half-local_size work-items
// s_scan[local_size] is the size of input non-duplicate array
scan(s_scan);
__syncthreads();
const int s_scan_sum = s_scan[local_size];
#endif
// if all elements are absorbed into merged list,
// the following work in this inner-loop is not needed any more
if (s_scan_sum == 0)
{
start_col_index_B += local_size;
continue;
}
// check if the total size is larger than the capicity of merged list
if (merged_size_l1 + s_scan_sum > c_buffsize)
{
if (start_col_index_B < stop_col_index_B)
{
// rollback on l2
binarysearch_sub(d_key_merged, d_val_merged, col_Ct, val_Ct, merged_size_l2);
// rollback on l1
binarysearch_sub(s_key_merged_l1, s_val_merged_l1, col_Ct, val_Ct, merged_size_l1);
}
__syncthreads();
// write a signal to some place, not equals -1 means next round is needed
if (local_id == 0)
{
d_queue[queue_id + 2] = merged_size_l2 + merged_size_l1;
d_queue[queue_id + 3] = start_col_index_A;
d_queue[queue_id + 4] = start_col_index_B;
}
// dump l1 to global
readwrite_mergedlist(d_key_merged, d_val_merged, s_key_merged_l1, s_val_merged_l1,
merged_size_l1, merged_size_l2, 1);
__syncthreads();
// merge l2 + l1 on global
VT = ceil((merged_size_l2 + merged_size_l1) / local_size_value_type);
diag = VT * local_id;
mp = mergepath_partition(d_key_merged, merged_size_l2,
&d_key_merged[merged_size_l2], merged_size_l1, diag);
mergepath_serialmerge(d_key_merged, d_val_merged,
mp, merged_size_l2, merged_size_l2 + diag - mp, merged_size_l2 + merged_size_l1,
reg_key, reg_val, VT);
__syncthreads();
for (is = 0; is < VT; is++)
{
d_key_merged[diag + is] = reg_key[is];
d_val_merged[diag + is] = reg_val[is];
}
return;
}
// write compact input to free place in merged list
if(is_new_col)
{
#if __CUDA_ARCH__ >= 300
reg_reuse1 = merged_size_l1 + r_scan;
#else
reg_reuse1 = merged_size_l1 + s_scan[local_id];
#endif
s_key_merged_l1[reg_reuse1] = col_Ct;
s_val_merged_l1[reg_reuse1] = val_Ct;
}
__syncthreads();
// merge path partition on l1
VT = ceil((merged_size_l1 + s_scan_sum) / local_size_value_type);
diag = VT * local_id;
mp = mergepath_partition(s_key_merged_l1, merged_size_l1,
&s_key_merged_l1[merged_size_l1], s_scan_sum, diag);
mergepath_serialmerge(s_key_merged_l1, s_val_merged_l1,
mp, merged_size_l1, merged_size_l1 + diag - mp, merged_size_l1 + s_scan_sum,
reg_key, reg_val, VT);
__syncthreads();
for (is = 0; is < VT; is++)
{
s_key_merged_l1[diag + is] = reg_key[is];
s_val_merged_l1[diag + is] = reg_val[is];
}
__syncthreads();
merged_size_l1 += s_scan_sum;
start_col_index_B += local_size;
}
start_col_index_A++;
}
__syncthreads();
if (local_id == 0)
{
d_csrRowPtrC[row_id] = merged_size_l2 + merged_size_l1;
d_queue[queue_id + 2] = -1;
}
// dump l1 to global
readwrite_mergedlist(d_key_merged, d_val_merged, s_key_merged_l1, s_val_merged_l1,
merged_size_l1, merged_size_l2, 1);
__syncthreads();
// merge l2 + l1 on global
VT = ceil((merged_size_l2 + merged_size_l1) / local_size_value_type);
diag = VT * local_id;
mp = mergepath_partition(d_key_merged, merged_size_l2,
&d_key_merged[merged_size_l2], merged_size_l1, diag);
mergepath_serialmerge(d_key_merged, d_val_merged,
mp, merged_size_l2, merged_size_l2 + diag - mp, merged_size_l2 + merged_size_l1,
reg_key, reg_val, VT);
__syncthreads();
for (is = 0; is < VT; is++)
{
d_key_merged[diag + is] = reg_key[is];
d_val_merged[diag + is] = reg_val[is];
}
}
int bhsparse_cuda::compute_nnzC_Ct_mergepath(int num_threads, int num_blocks, int j,
int mergebuffer_size, int position, int *count_next, int mergepath_location)
{
hipError_t err = hipSuccess;
StopWatchInterface *timer = NULL;
if (_profiling)
{
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
}
if (mergepath_location == MERGEPATH_LOCAL)
{
//cout << "doing merge with num_threads = " << num_threads << endl;
switch (mergebuffer_size)
{
case 256:
hipLaunchKernelGGL(( EM_mergepath<value_type, 256, 64>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt, position);
break;
case 512:
hipLaunchKernelGGL(( EM_mergepath<value_type, 512, 128>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt, position);
break;
case 1024:
hipLaunchKernelGGL(( EM_mergepath<value_type, 1024, 256>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt, position);
break;
case 2048:
hipLaunchKernelGGL(( EM_mergepath<value_type, 2048, 256>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt, position);
break;
case 2560:
hipLaunchKernelGGL(( EM_mergepath<value_type, 2560, 256>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt, position);
break;
}
}
else if (mergepath_location == MERGEPATH_GLOBAL)
{
//cout << "EM_mergepath_global is called." << endl;
hipLaunchKernelGGL(( EM_mergepath_global<value_type, 2560, 256>), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt, position);
}
err = hipGetLastError();
if (err != hipSuccess) { cout << "err = " << hipGetErrorString(err) << endl; return -1; }
if (_profiling)
{
err = hipDeviceSynchronize();
sdkStopTimer(&timer);
cout << "[ " << j << " ] EM_mergepath time: " << sdkGetTimerValue(&timer) << " ms" << endl;
}
// load d_queue back, check if there is still any row needs next level merge,
checkCudaErrors(hipMemcpy(&_h_queue_one[TUPLE_QUEUE * position],
&_d_queue_one[TUPLE_QUEUE * position],
TUPLE_QUEUE * num_blocks * sizeof(int), hipMemcpyDeviceToHost));
int temp_queue [6] = {0, 0, 0, 0, 0, 0};
int counter = 0;
int temp_num = 0;
for (int i = position; i < position + num_blocks; i++)
{
// if yes, (1)malloc device mem, (2)upgrade mem address on pos1 and (3)use pos5 as last mem address
if (_h_queue_one[TUPLE_QUEUE * i + 2] != -1)
{
temp_queue[0] = _h_queue_one[TUPLE_QUEUE * i]; // row id
if (mergepath_location == MERGEPATH_LOCAL)
{
int accum = 0;
switch (mergebuffer_size)
{
case 256:
accum = 512;
break;
case 512:
accum = 1024;
break;
case 1024:
accum = 2048;
break;
case 2048:
accum = 2560;
break;
case 2560:
accum = 2560 * 2;
break;
}
temp_queue[1] = _nnzCt + counter * accum; // new start address
}
else if (mergepath_location == MERGEPATH_GLOBAL)
temp_queue[1] = _nnzCt + counter * (mergebuffer_size + 2560); // new start address
//temp_queue[1] = _nnzCt + counter * mergebuffer_size * 2; // new start address
temp_queue[2] = _h_queue_one[TUPLE_QUEUE * i + 2]; // merged size
temp_queue[3] = _h_queue_one[TUPLE_QUEUE * i + 3]; // i
temp_queue[4] = _h_queue_one[TUPLE_QUEUE * i + 4]; // k
temp_queue[5] = _h_queue_one[TUPLE_QUEUE * i + 1]; // old start address
_h_queue_one[TUPLE_QUEUE * i] = _h_queue_one[TUPLE_QUEUE * (position + counter)]; // row id
_h_queue_one[TUPLE_QUEUE * i + 1] = _h_queue_one[TUPLE_QUEUE * (position + counter) + 1]; // new start address
_h_queue_one[TUPLE_QUEUE * i + 2] = _h_queue_one[TUPLE_QUEUE * (position + counter) + 2]; // merged size
_h_queue_one[TUPLE_QUEUE * i + 3] = _h_queue_one[TUPLE_QUEUE * (position + counter) + 3]; // i
_h_queue_one[TUPLE_QUEUE * i + 4] = _h_queue_one[TUPLE_QUEUE * (position + counter) + 4]; // k
_h_queue_one[TUPLE_QUEUE * i + 5] = _h_queue_one[TUPLE_QUEUE * (position + counter) + 5]; // old start address
_h_queue_one[TUPLE_QUEUE * (position + counter)] = temp_queue[0]; // row id
_h_queue_one[TUPLE_QUEUE * (position + counter) + 1] = temp_queue[1]; // new start address
_h_queue_one[TUPLE_QUEUE * (position + counter) + 2] = temp_queue[2]; // merged size
_h_queue_one[TUPLE_QUEUE * (position + counter) + 3] = temp_queue[3]; // i
_h_queue_one[TUPLE_QUEUE * (position + counter) + 4] = temp_queue[4]; // k
_h_queue_one[TUPLE_QUEUE * (position + counter) + 5] = temp_queue[5]; // old start address
counter++;
temp_num += _h_queue_one[TUPLE_QUEUE * i + 2];
}
}
if (counter > 0)
{
//int nnzCt_new = _nnzCt + counter * mergebuffer_size * 2;
int nnzCt_new;
if (mergepath_location == MERGEPATH_LOCAL)
{
int accum = 0;
switch (mergebuffer_size)
{
case 256:
accum = 512;
break;
case 512:
accum = 1024;
break;
case 1024:
accum = 2048;
break;
case 2048:
accum = 2560;
break;
case 2560:
accum = 2560 * 2;
break;
}
nnzCt_new = _nnzCt + counter * accum; //_nnzCt + counter * mergebuffer_size * 2;
}
else if (mergepath_location == MERGEPATH_GLOBAL)
nnzCt_new = _nnzCt + counter * (mergebuffer_size + 2560);
cout << "nnzCt_new = " << nnzCt_new << endl;
// malloc new device memory
index_type *d_csrColIndCt_new;
//checkCudaErrors(hipMalloc((void **)&d_csrColIndCt_new, nnzCt_new * sizeof(index_type)));
err = hipMalloc((void **)&d_csrColIndCt_new, nnzCt_new * sizeof(index_type));
if (err != hipSuccess)
{
//cout << "errb = " << hipGetErrorString(err) << ". malloc extra memory." << endl;
index_type *h_csrColIndCt = (index_type *)malloc(_nnzCt * sizeof(index_type));
// copy last device mem to a temp space on host
checkCudaErrors(hipMemcpy(h_csrColIndCt, _d_csrColIndCt, _nnzCt * sizeof(index_type), hipMemcpyDeviceToHost));
//cout << "err1c = " << hipGetErrorString(err) << ". ." << endl;
//err = hipDeviceSynchronize();
// free last device mem
checkCudaErrors(hipFree(_d_csrColIndCt));
//cout << "err2c = " << hipGetErrorString(err) << ". ." << endl;
//err = hipDeviceSynchronize();
checkCudaErrors(hipMalloc((void **)&d_csrColIndCt_new, nnzCt_new * sizeof(index_type)));
//cout << "err3c = " << hipGetErrorString(err) << ". ." << endl;
// copy data in the temp space on host to device
checkCudaErrors(hipMemcpy(d_csrColIndCt_new, h_csrColIndCt, _nnzCt * sizeof(index_type), hipMemcpyHostToDevice));
//cout << "err4c = " << hipGetErrorString(err) << ". ." << endl;
free(h_csrColIndCt);
}
else
{
checkCudaErrors(hipMemcpy(d_csrColIndCt_new, _d_csrColIndCt, _nnzCt * sizeof(index_type), hipMemcpyDeviceToDevice));
checkCudaErrors(hipFree(_d_csrColIndCt));
}
_d_csrColIndCt = d_csrColIndCt_new;
value_type *d_csrValCt_new;
//checkCudaErrors(hipMalloc((void **)&d_csrValCt_new, nnzCt_new * sizeof(value_type)));
err = hipMalloc((void **)&d_csrValCt_new, nnzCt_new * sizeof(value_type));
if (err != hipSuccess)
{
//cout << "erra = " << hipGetErrorString(err) << ". malloc extra memory." << endl;
value_type *h_csrValCt = (value_type *)malloc(_nnzCt * sizeof(value_type));
// copy last device mem to a temp space on host
checkCudaErrors(hipMemcpy(h_csrValCt, _d_csrValCt, _nnzCt * sizeof(value_type), hipMemcpyDeviceToHost));
//cout << "err1v = " << hipGetErrorString(err) << ". ." << endl;
//err = hipDeviceSynchronize();
// free last device mem
checkCudaErrors(hipFree(_d_csrValCt));
//cout << "err2v = " << hipGetErrorString(err) << ". ." << endl;
//err = hipDeviceSynchronize();
checkCudaErrors(hipMalloc((void **)&d_csrValCt_new, nnzCt_new * sizeof(value_type)));
//cout << "err3v = " << hipGetErrorString(err) << ". ." << endl;
// copy data in the temp space on host to device
checkCudaErrors(hipMemcpy(d_csrValCt_new, h_csrValCt, _nnzCt * sizeof(value_type), hipMemcpyHostToDevice));
//cout << "err4v = " << hipGetErrorString(err) << ". ." << endl;
free(h_csrValCt);
}
else
{
// copy last device mem to current one, device to device copy
checkCudaErrors(hipMemcpy(d_csrValCt_new, _d_csrValCt, _nnzCt * sizeof(value_type), hipMemcpyDeviceToDevice));
// free last device mem
checkCudaErrors(hipFree(_d_csrValCt));
}
_d_csrValCt = d_csrValCt_new;
// rewrite d_queue
checkCudaErrors(hipMemcpy(&_d_queue_one[TUPLE_QUEUE * position],
&_h_queue_one[TUPLE_QUEUE * position],
TUPLE_QUEUE * num_blocks * sizeof(int), hipMemcpyHostToDevice));
//cout << "seems good." << endl;
_nnzCt = nnzCt_new;
}
*count_next = counter;
return BHSPARSE_SUCCESS;
}
int bhsparse_cuda::create_C()
{
int err = 0;
checkCudaErrors(hipMemcpy(_h_csrRowPtrC, _d_csrRowPtrC, (_m + 1) * sizeof(index_type), hipMemcpyDeviceToHost));
int old_val, new_val;
old_val = _h_csrRowPtrC[0];
_h_csrRowPtrC[0] = 0;
for (int i = 1; i <= _m; i++)
{
new_val = _h_csrRowPtrC[i];
_h_csrRowPtrC[i] = old_val + _h_csrRowPtrC[i-1];
old_val = new_val;
}
_nnzC = _h_csrRowPtrC[_m];
// create device mem of C
checkCudaErrors(hipMalloc((void **)&_d_csrColIndC, _nnzC * sizeof(index_type)));
checkCudaErrors(hipMalloc((void **)&_d_csrValC, _nnzC * sizeof(value_type)));
checkCudaErrors(hipMemset(_d_csrColIndC, 0, _nnzC * sizeof(index_type)));
checkCudaErrors(hipMemset(_d_csrValC, 0, _nnzC * sizeof(value_type)));
checkCudaErrors(hipMemcpy(_d_csrRowPtrC, _h_csrRowPtrC, (_m + 1) * sizeof(index_type), hipMemcpyHostToDevice));
return err;
}
__global__ void
copyCt2C_Single(const int* d_csrRowPtrC,
int* d_csrColIndC,
value_type* d_csrValC,
const int* d_csrRowPtrCt,
const int* d_csrColIndCt,
const value_type* d_csrValCt,
const int* d_queue,
const int size,
const int d_queue_offset)
{
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
bool valid = (global_id < size);
int row_id = valid ? d_queue[TUPLE_QUEUE * (d_queue_offset + global_id)] : 0;
int Ct_base_start = valid ? d_queue[TUPLE_QUEUE * (d_queue_offset + global_id) + 1] : 0; //d_csrRowPtrCt[row_id] : 0;
int C_base_start = valid ? d_csrRowPtrC[row_id] : 0;
int colC = valid ? d_csrColIndCt[Ct_base_start] : 0;
value_type valC = valid ? d_csrValCt[Ct_base_start] : 0.0f;
if (valid)
{
d_csrColIndC[C_base_start] = colC;
d_csrValC[C_base_start] = valC;
}
}
__global__ void
copyCt2C_Loopless(const int* d_csrRowPtrC,
int* d_csrColIndC,
value_type* d_csrValC,
const int* d_csrRowPtrCt,
const int* d_csrColIndCt,
const value_type* d_csrValCt,
const int* d_queue,
const int d_queue_offset)
{
int local_id = threadIdx.x;
int group_id = blockIdx.x;
int row_id = d_queue[TUPLE_QUEUE * (d_queue_offset + group_id)];
int Ct_base_start = d_queue[TUPLE_QUEUE * (d_queue_offset + group_id) + 1] + local_id; //d_csrRowPtrCt[row_id] + local_id;
int C_base_start = d_csrRowPtrC[row_id] + local_id;
int C_base_stop = d_csrRowPtrC[row_id + 1];
if (C_base_start < C_base_stop)
{
d_csrColIndC[C_base_start] = d_csrColIndCt[Ct_base_start];
d_csrValC[C_base_start] = d_csrValCt[Ct_base_start];
}
}
__global__ void
copyCt2C_Loop(const int* d_csrRowPtrC,
int* d_csrColIndC,
value_type* d_csrValC,
const int* d_csrRowPtrCt,
const int* d_csrColIndCt,
const value_type* d_csrValCt,
const int* d_queue,
const int d_queue_offset)
{
int local_id = threadIdx.x;
int group_id = blockIdx.x;
int local_size = blockDim.x;
int row_id = d_queue[TUPLE_QUEUE * (d_queue_offset + group_id)];
int Ct_base_start = d_queue[TUPLE_QUEUE * (d_queue_offset + group_id) + 1]; //d_csrRowPtrCt[row_id];
int C_base_start = d_csrRowPtrC[row_id];
int C_base_stop = d_csrRowPtrC[row_id + 1];
int stride = C_base_stop - C_base_start;
bool valid;
int loop = ceil((float)stride / (float)local_size);
C_base_start += local_id;
Ct_base_start += local_id;
for (int i = 0; i < loop; i++)
{
valid = (C_base_start < C_base_stop);
if (valid)
{
d_csrColIndC[C_base_start] = d_csrColIndCt[Ct_base_start];
d_csrValC[C_base_start] = d_csrValCt[Ct_base_start];
}
C_base_start += local_size;
Ct_base_start += local_size;
}
}
int bhsparse_cuda::copy_Ct_to_C_Single(int num_threads, int num_blocks, int local_size, int position)
{
hipError_t err = hipSuccess;
StopWatchInterface *timer = NULL;
if (_profiling)
{
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
}
hipLaunchKernelGGL(( copyCt2C_Single), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_csrRowPtrC, _d_csrColIndC, _d_csrValC,
_d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
_d_queue_one, local_size, position);
err = hipGetLastError();
if (err != hipSuccess)
{ cout << "err = " << hipGetErrorString(err) << endl; return -1; }
if (_profiling)
{
err = hipDeviceSynchronize();
if (err != hipSuccess)
{ cout << "err = " << hipGetErrorString(err) << endl; return -1; }
sdkStopTimer(&timer);
//cout << "copyCt2C_Single[ " << j << " ] time = " << sdkGetTimerValue(&timer) << " ms" << endl;
}
return BHSPARSE_SUCCESS;
}
int bhsparse_cuda::copy_Ct_to_C_Loopless(int num_threads, int num_blocks, int j, int position)
{
hipError_t err = hipSuccess;
StopWatchInterface *timer = NULL;
if (_profiling)
{
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
}
hipLaunchKernelGGL(( copyCt2C_Loopless), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_csrRowPtrC, _d_csrColIndC, _d_csrValC,
_d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
_d_queue_one, position);
err = hipGetLastError();
if (err != hipSuccess)
{ cout << "err = " << hipGetErrorString(err) << endl; return -1; }
if (_profiling)
{
err = hipDeviceSynchronize();
if (err != hipSuccess)
{ cout << "err = " << hipGetErrorString(err) << endl; return -1; }
sdkStopTimer(&timer);
//cout << "copyCt2C_Loopless[ " << j << " ] time = " << sdkGetTimerValue(&timer) << " ms" << endl;
}
return BHSPARSE_SUCCESS;
}
int bhsparse_cuda::copy_Ct_to_C_Loop(int num_threads, int num_blocks, int j, int position)
{
hipError_t err = hipSuccess;
StopWatchInterface *timer = NULL;
if (_profiling)
{
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
}
hipLaunchKernelGGL(( copyCt2C_Loop), dim3(num_blocks), dim3(num_threads) , 0, 0, _d_csrRowPtrC, _d_csrColIndC, _d_csrValC,
_d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
_d_queue_one, position);
err = hipGetLastError();
if (err != hipSuccess)
{ cout << "err = " << hipGetErrorString(err) << endl; return -1; }
if (_profiling)
{
err = hipDeviceSynchronize();
if (err != hipSuccess)
{ cout << "err = " << hipGetErrorString(err) << endl; return -1; }
sdkStopTimer(&timer);
//cout << "copyCt2C_Loop[ " << j << " ] time = " << sdkGetTimerValue(&timer) << " ms" << endl;
}
return BHSPARSE_SUCCESS;
}
int bhsparse_cuda::get_nnzC()
{
return _nnzC;
}
int bhsparse_cuda::get_C(index_type *csrColIndC, value_type *csrValC)
{
int err = 0;
checkCudaErrors(hipMemcpy(csrColIndC, _d_csrColIndC, _nnzC * sizeof(index_type), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(_h_csrRowPtrC, _d_csrRowPtrC, (_m + 1) * sizeof(index_type), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(csrValC, _d_csrValC, _nnzC * sizeof(value_type), hipMemcpyDeviceToHost));
return err;
}
| 86b18b47efe6eaeebef5527e0a6f942a4eeca3a8.cu | //////////////////////////////////////////////////////////////////////////
// < A CUDA/OpenCL General Sparse Matrix-Matrix Multiplication Program >
//
// < See paper:
// Weifeng Liu and Brian Vinter, "An Efficient GPU General Sparse
// Matrix-Matrix Multiplication for Irregular Data," Parallel and
// Distributed Processing Symposium, 2014 IEEE 28th International
// (IPDPS '14), pp.370-381, 19-23 May 2014
// for details. >
//////////////////////////////////////////////////////////////////////////
#include "bhsparse_cuda.h"
bhsparse_cuda::bhsparse_cuda()
{
}
int bhsparse_cuda::initPlatform()
{
_profiling = false;
int device_id = 0;
cudaSetDevice(device_id);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device_id);
_num_smxs = deviceProp.multiProcessorCount;
_max_blocks_per_smx = deviceProp.maxThreadsPerMultiProcessor / WARPSIZE_NV;
cout << "Device [" << device_id << "] " << deviceProp.name
<< " @ " << deviceProp.clockRate * 1e-3f << "MHz. "
<< _num_smxs << " SMXs." << endl;
return 0;
}
int bhsparse_cuda::freePlatform()
{
int err = 0;
return err;
}
int bhsparse_cuda::free_mem()
{
int err = 0;
// A
cudaFree(_d_csrValA);
cudaFree(_d_csrRowPtrA);
cudaFree(_d_csrColIndA);
// B
cudaFree(_d_csrValB);
cudaFree(_d_csrRowPtrB);
cudaFree(_d_csrColIndB);
// C
cudaFree(_d_csrValC);
cudaFree(_d_csrRowPtrC);
cudaFree(_d_csrColIndC);
// Ct
cudaFree(_d_csrValCt);
cudaFree(_d_csrRowPtrCt);
cudaFree(_d_csrColIndCt);
// QUEUE_ONEs
cudaFree(_d_queue_one);
return err;
}
int bhsparse_cuda::initData(int m, int k, int n,
int nnzA, value_type *csrValA, index_type *csrRowPtrA, index_type *csrColIndA,
int nnzB, value_type *csrValB, index_type *csrRowPtrB, index_type *csrColIndB,
index_type *csrRowPtrC, index_type *csrRowPtrCt, index_type *queue_one)
{
int err = 0;
_m = m;
_k = k;
_n = n;
_nnzA = nnzA;
_nnzB = nnzB;
_nnzC = 0;
_nnzCt = 0;
// malloc mem space and copy data from host to device
// Matrix A
checkCudaErrors(cudaMalloc((void **)&_d_csrColIndA, _nnzA * sizeof(index_type)));
checkCudaErrors(cudaMalloc((void **)&_d_csrRowPtrA, (_m+1) * sizeof(index_type)));
checkCudaErrors(cudaMalloc((void **)&_d_csrValA, _nnzA * sizeof(value_type)));
checkCudaErrors(cudaMemcpy(_d_csrColIndA, csrColIndA, _nnzA * sizeof(index_type), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_d_csrRowPtrA, csrRowPtrA, (_m+1) * sizeof(index_type), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_d_csrValA, csrValA, _nnzA * sizeof(value_type), cudaMemcpyHostToDevice));
// Matrix B
checkCudaErrors(cudaMalloc((void **)&_d_csrColIndB, _nnzB * sizeof(index_type)));
checkCudaErrors(cudaMalloc((void **)&_d_csrRowPtrB, (_k+1) * sizeof(index_type)));
checkCudaErrors(cudaMalloc((void **)&_d_csrValB, _nnzB * sizeof(value_type)));
checkCudaErrors(cudaMemcpy(_d_csrColIndB, csrColIndB, _nnzB * sizeof(index_type), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_d_csrRowPtrB, csrRowPtrB, (_k+1) * sizeof(index_type), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_d_csrValB, csrValB, _nnzB * sizeof(value_type), cudaMemcpyHostToDevice));
// Matrix C
_h_csrRowPtrC = csrRowPtrC;
checkCudaErrors(cudaMalloc((void **)&_d_csrRowPtrC, (_m+1) * sizeof(index_type)));
checkCudaErrors(cudaMemset(_d_csrRowPtrC, 0, (_m+1) * sizeof(index_type)));
// Matrix Ct
_h_csrRowPtrCt = csrRowPtrCt;
checkCudaErrors(cudaMalloc((void **)&_d_csrRowPtrCt, (_m+1) * sizeof(index_type)));
checkCudaErrors(cudaMemset(_d_csrRowPtrCt, 0, (_m+1) * sizeof(index_type)));
// statistics - queue_one
_h_queue_one = queue_one;
checkCudaErrors(cudaMalloc((void **)&_d_queue_one, TUPLE_QUEUE * _m * sizeof(index_type)));
checkCudaErrors(cudaMemset(_d_queue_one, 0, TUPLE_QUEUE * _m * sizeof(index_type)));
return err;
}
void bhsparse_cuda::setProfiling(bool profiling)
{
_profiling = profiling;
}
__global__ void
compute_nnzCt_cudakernel(const int* d_csrRowPtrA,
const int* __restrict__ d_csrColIndA,
const int* d_csrRowPtrB,
int *d_csrRowPtrCt,
const int m)
{
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
int start, stop, index, strideB, row_size_Ct = 0;
if (global_id < m)
{
start = d_csrRowPtrA[global_id];
stop = d_csrRowPtrA[global_id + 1];
for (int i = start; i < stop; i++)
{
index = d_csrColIndA[i];
strideB = d_csrRowPtrB[index + 1] - d_csrRowPtrB[index];
row_size_Ct += strideB;
}
d_csrRowPtrCt[global_id] = row_size_Ct;
}
if (global_id == 0)
d_csrRowPtrCt[m] = 0;
}
int bhsparse_cuda::warmup()
{
cudaError_t err = cudaSuccess;
int num_threads = GROUPSIZE_256;
int num_blocks = ceil((double)_m / (double)num_threads);
compute_nnzCt_cudakernel<<< num_blocks, num_threads >>>(_d_csrRowPtrA,
_d_csrColIndA,
_d_csrRowPtrB,
_d_csrRowPtrCt,
_m);
err = cudaGetLastError();
if (err != cudaSuccess)
{ cout << "err = " << cudaGetErrorString(err) << endl; return -1; }
return BHSPARSE_SUCCESS;
}
int bhsparse_cuda::kernel_barrier()
{
return cudaDeviceSynchronize();
}
int bhsparse_cuda::compute_nnzCt()
{
cudaError_t err = cudaSuccess;
int num_threads = GROUPSIZE_256;
int num_blocks = ceil((double)_m / (double)num_threads);
compute_nnzCt_cudakernel<<< num_blocks, num_threads >>>(_d_csrRowPtrA,
_d_csrColIndA,
_d_csrRowPtrB,
_d_csrRowPtrCt,
_m);
err = cudaGetLastError();
if (err != cudaSuccess)
{ cout << "err = " << cudaGetErrorString(err) << endl; return -1; }
checkCudaErrors(cudaMemcpy(_h_csrRowPtrCt, _d_csrRowPtrCt, (_m + 1) * sizeof(index_type), cudaMemcpyDeviceToHost));
return BHSPARSE_SUCCESS;
}
int bhsparse_cuda::create_Ct(int nnzCt)
{
int err = 0;
checkCudaErrors(cudaMemcpy(_d_queue_one, _h_queue_one, TUPLE_QUEUE * _m * sizeof(index_type), cudaMemcpyHostToDevice));
_nnzCt = nnzCt;
// create device mem of Ct
checkCudaErrors(cudaMalloc((void **)&_d_csrColIndCt, _nnzCt * sizeof(index_type)));
checkCudaErrors(cudaMalloc((void **)&_d_csrValCt, _nnzCt * sizeof(value_type)));
checkCudaErrors(cudaMemset(_d_csrColIndCt, 0, _nnzCt * sizeof(index_type)));
checkCudaErrors(cudaMemset(_d_csrValCt, 0, _nnzCt * sizeof(value_type)));
return err;
}
//__inline__ __device__ void
//siftDown(int *s_key,
// value_type *s_val,
// int start,
// int stop)
//{
// int root = start;
// int child, swap;
// int temp_swap_key;
// value_type temp_swap_val;
// while (root * 2 + 1 <= stop)
// {
// child = root * 2 + 1;
// swap = root;
// if (s_key[swap] < s_key[child])
// swap = child;
// if (child + 1 <= stop && s_key[swap] < s_key[child + 1])
// swap = child + 1;
// if (swap != root)
// {
// //swap root and swap
// temp_swap_key = s_key[root];
// s_key[root] = s_key[swap];
// s_key[swap] = temp_swap_key;
// temp_swap_val = s_val[root];
// s_val[root] = s_val[swap];
// s_val[swap] = temp_swap_val;
// root = swap;
// }
// else
// return;
// }
//}
//__inline__ __device__ int
//heapsort(int *s_key,
// value_type *s_val,
// int segment_size)
//{
// // heapsort - heapify max-heap
// int start = (segment_size - 1) / 2;
// int stop = segment_size - 1;
// while (start >= 0)
// {
// siftDown(s_key, s_val, start, stop);
// start--;
// }
// // inject root element to the end
// int temp_swap_key;
// value_type temp_swap_val;
// temp_swap_key = s_key[0];
// s_key[0] = s_key[stop];
// s_key[stop] = temp_swap_key;
// temp_swap_val = s_val[0];
// s_val[0] = s_val[stop];
// s_val[stop] = temp_swap_val;
// stop--;
// siftDown(s_key, s_val, 0, stop);
// // this start is compressed list's start
// start = segment_size - 1;
// // heapsort - remove-max and compress
// while (stop >= 0)
// {
// if (s_key[0] == s_key[start])
// {
// s_val[start] += s_val[0];
// s_key[0] = s_key[stop];
// s_val[0] = s_val[stop];
// }
// else
// {
// start--;
// if (stop == start)
// {
// temp_swap_key = s_key[0];
// s_key[0] = s_key[stop];
// s_key[stop] = temp_swap_key;
// temp_swap_val = s_val[0];
// s_val[0] = s_val[stop];
// s_val[stop] = temp_swap_val;
// }
// else
// {
// s_key[start] = s_key[0];
// s_val[start] = s_val[0];
// s_key[0] = s_key[stop];
// s_val[0] = s_val[stop];
// }
// }
// stop--;
// siftDown(s_key, s_val, 0, stop);
// }
// return start;
//}
//template<typename vT, int c_segmentsize>
//__global__ void
//ESC_2heap_noncoalesced(const int* d_queue,
// const int* d_csrRowPtrA,
// const int* __restrict__ d_csrColIndA,
// const vT* __restrict__ d_csrValA,
// const int* d_csrRowPtrB,
// const int* __restrict__ d_csrColIndB,
// const vT* __restrict__ d_csrValB,
// int* d_csrRowPtrC,
// const int* d_csrRowPtrCt,
// int* d_csrColIndCt,
// vT* d_csrValCt,
// const int queue_size,
// const int d_queue_offset)
//{
// __shared__ int s_key[c_segmentsize * WARPSIZE_NV_2HEAP];
// __shared__ vT s_val[c_segmentsize * WARPSIZE_NV_2HEAP];
// __shared__ char s_start[WARPSIZE_NV_2HEAP];
// __shared__ char s_count[WARPSIZE_NV_2HEAP];
// int local_id = threadIdx.x;
// int group_id = blockIdx.x;
// int global_id = blockIdx.x * blockDim.x + threadIdx.x;
// int num_groups = gridDim.x;
// int local_size = blockDim.x;
// int *s_key_local = &s_key[local_id * c_segmentsize];
// vT *s_val_local = &s_val[local_id * c_segmentsize];
// if (global_id < queue_size)
// {
// int i, counter = 0;
// int rowidC = d_queue[TUPLE_QUEUE * (d_queue_offset + global_id)];
// int start_col_index_A, stop_col_index_A;
// int rowidB, start_col_index_B, stop_col_index_B;
// vT value_A;
// start_col_index_A = d_csrRowPtrA[rowidC];
// stop_col_index_A = d_csrRowPtrA[rowidC + 1];
// // i is both col index of A and row index of B
// for (i = start_col_index_A; i < stop_col_index_A; i++)
// {
// rowidB = d_csrColIndA[i];
// value_A = d_csrValA[i];
// start_col_index_B = d_csrRowPtrB[rowidB];
// stop_col_index_B = d_csrRowPtrB[rowidB + 1];
// for (int j = start_col_index_B; j < stop_col_index_B; j++)
// {
// s_key_local[counter] = d_csrColIndB[j];
// s_val_local[counter] = d_csrValB[j] * value_A;
// counter++;
// }
// }
// // heapsort in each work-item
// int local_start = heapsort(s_key_local, s_val_local, counter);
// counter -= local_start;
// s_start[local_id] = local_start;
// s_count[local_id] = counter;
// d_csrRowPtrC[rowidC] = counter;
// }
// __syncthreads();
// // write compressed lists to global mem
// int base_index;
// int local_offset;
// int loop = group_id != num_groups - 1 ? local_size : queue_size - group_id * local_size;
// for (int i = 0; i < loop; i++)
// {
// base_index = d_queue[TUPLE_QUEUE * (d_queue_offset + group_id * local_size + i) + 1] + local_id;
// local_offset = c_segmentsize * i + local_id + s_start[i];
// if (local_id < s_count[i])
// {
// d_csrColIndCt[base_index] = s_key[local_offset];
// d_csrValCt[base_index] = s_val[local_offset];
// }
// if (local_id + local_size < s_count[i])
// {
// base_index += local_size;
// local_offset += local_size;
// d_csrColIndCt[base_index] = s_key[local_offset];
// d_csrValCt[base_index] = s_val[local_offset];
// }
// }
//}
__inline__ __device__ void
siftDown(int *s_key,
value_type *s_val,
const int start,
const int stop,
const int local_id,
const int local_size)
{
int root = start;
int child, swap;
int temp_swap_key;
value_type temp_swap_val;
while (root * 2 + 1 <= stop)
{
child = root * 2 + 1;
swap = root;
if (s_key[swap * local_size + local_id] < s_key[child * local_size + local_id])
swap = child;
if (child + 1 <= stop && s_key[swap * local_size + local_id] < s_key[(child + 1) * local_size + local_id])
swap = child + 1;
if (swap != root)
{
const int index1 = root * local_size + local_id;
const int index2 = swap * local_size + local_id;
//swap root and swap
temp_swap_key = s_key[index1];
s_key[index1] = s_key[index2];
s_key[index2] = temp_swap_key;
temp_swap_val = s_val[index1];
s_val[index1] = s_val[index2];
s_val[index2] = temp_swap_val;
root = swap;
}
else
return;
}
}
__inline__ __device__ int
heapsort(int *s_key,
value_type *s_val,
const int segment_size,
const int local_id,
const int local_size)
{
// heapsort - heapify max-heap
int start = (segment_size - 1) / 2;
int stop = segment_size - 1;
int index1, index2;
while (start >= 0)
{
siftDown(s_key, s_val, start, stop, local_id, local_size);
start--;
}
// inject root element to the end
int temp_swap_key;
value_type temp_swap_val;
index1 = stop * local_size + local_id;
temp_swap_key = s_key[local_id];
s_key[local_id] = s_key[index1];
s_key[index1] = temp_swap_key;
temp_swap_val = s_val[local_id];
s_val[local_id] = s_val[index1];
s_val[index1] = temp_swap_val;
stop--;
siftDown(s_key, s_val, 0, stop, local_id, local_size);
// this start is compressed list's start
start = segment_size - 1;
// heapsort - remove-max and compress
while (stop >= 0)
{
index2 = stop * local_size + local_id;
if (s_key[local_id] == s_key[start * local_size + local_id])
{
s_val[start * local_size + local_id] += s_val[local_id];
s_key[local_id] = s_key[index2];
s_val[local_id] = s_val[index2];
}
else
{
start--;
index1 = start * local_size + local_id;
if (stop == start)
{
temp_swap_key = s_key[local_id];
s_key[local_id] = s_key[index2];
s_key[index2] = temp_swap_key;
temp_swap_val = s_val[local_id];
s_val[local_id] = s_val[index2];
s_val[index2] = temp_swap_val;
}
else
{
s_key[index1] = s_key[local_id];
s_val[index1] = s_val[local_id];
s_key[local_id] = s_key[index2];
s_val[local_id] = s_val[index2];
}
}
stop--;
siftDown(s_key, s_val, 0, stop, local_id, local_size);
}
return start;
}
template<typename vT, int c_segmentsize>
__global__ void
ESC_2heap_noncoalesced(const int* d_queue,
const int* d_csrRowPtrA,
const int* __restrict__ d_csrColIndA,
const vT* __restrict__ d_csrValA,
const int* d_csrRowPtrB,
const int* __restrict__ d_csrColIndB,
const vT* __restrict__ d_csrValB,
int* d_csrRowPtrC,
const int* d_csrRowPtrCt,
int* d_csrColIndCt,
vT* d_csrValCt,
const int queue_size,
const int d_queue_offset)
{
__shared__ int s_key[c_segmentsize * WARPSIZE_NV_2HEAP];
__shared__ vT s_val[c_segmentsize * WARPSIZE_NV_2HEAP];
const int local_id = threadIdx.x;
const int group_id = blockIdx.x;
const int global_id = blockIdx.x * blockDim.x + threadIdx.x;
const int local_size = blockDim.x;
int index = 0;
if (global_id < queue_size)
{
int i, counter = 0;
int start_col_index_A, stop_col_index_A;
int rowidB, start_col_index_B, stop_col_index_B;
vT value_A;
int rowidC = d_queue[TUPLE_QUEUE * (d_queue_offset + global_id)];
start_col_index_A = d_csrRowPtrA[rowidC];
stop_col_index_A = d_csrRowPtrA[rowidC + 1];
// i is both col index of A and row index of B
for (i = start_col_index_A; i < stop_col_index_A; i++)
{
rowidB = d_csrColIndA[i];
value_A = d_csrValA[i];
start_col_index_B = d_csrRowPtrB[rowidB];
stop_col_index_B = d_csrRowPtrB[rowidB + 1];
for (int j = start_col_index_B; j < stop_col_index_B; j++)
{
index = counter * local_size + local_id;
s_key[index] = d_csrColIndB[j];
s_val[index] = d_csrValB[j] * value_A;
counter++;
}
}
// heapsort in each work-item
int local_start = heapsort(s_key, s_val, counter, local_id, local_size);
counter -= local_start;
d_csrRowPtrC[rowidC] = counter;
int base_index = d_queue[TUPLE_QUEUE * (d_queue_offset + group_id * local_size + local_id) + 1];;
for (int i = 0; i < counter; i++)
{
d_csrColIndCt[base_index + i] = s_key[(local_start+i) * local_size + local_id];
d_csrValCt[base_index + i] = s_val[(local_start+i) * local_size + local_id];
}
}
}
int bhsparse_cuda::compute_nnzC_Ct_2heap_noncoalesced(int num_threads, int num_blocks, int j, int counter, int position)
{
cudaError_t err = cudaSuccess;
StopWatchInterface *timer = NULL;
if (_profiling)
{
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
}
switch (j)
{
case 2:
ESC_2heap_noncoalesced<value_type, 2><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 3:
ESC_2heap_noncoalesced<value_type, 3><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 4:
ESC_2heap_noncoalesced<value_type, 4><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 5:
ESC_2heap_noncoalesced<value_type, 5><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 6:
ESC_2heap_noncoalesced<value_type, 6><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 7:
ESC_2heap_noncoalesced<value_type, 7><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 8:
ESC_2heap_noncoalesced<value_type, 8><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 9:
ESC_2heap_noncoalesced<value_type, 9><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 10:
ESC_2heap_noncoalesced<value_type, 10><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 11:
ESC_2heap_noncoalesced<value_type, 11><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 12:
ESC_2heap_noncoalesced<value_type, 12><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 13:
ESC_2heap_noncoalesced<value_type, 13><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 14:
ESC_2heap_noncoalesced<value_type, 14><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 15:
ESC_2heap_noncoalesced<value_type, 15><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 16:
ESC_2heap_noncoalesced<value_type, 16><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 17:
ESC_2heap_noncoalesced<value_type, 17><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 18:
ESC_2heap_noncoalesced<value_type, 18><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 19:
ESC_2heap_noncoalesced<value_type, 19><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 20:
ESC_2heap_noncoalesced<value_type, 20><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 21:
ESC_2heap_noncoalesced<value_type, 21><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 22:
ESC_2heap_noncoalesced<value_type, 22><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 23:
ESC_2heap_noncoalesced<value_type, 23><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 24:
ESC_2heap_noncoalesced<value_type, 24><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 25:
ESC_2heap_noncoalesced<value_type, 25><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 26:
ESC_2heap_noncoalesced<value_type, 26><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 27:
ESC_2heap_noncoalesced<value_type, 27><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 28:
ESC_2heap_noncoalesced<value_type, 28><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 29:
ESC_2heap_noncoalesced<value_type, 29><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 30:
ESC_2heap_noncoalesced<value_type, 30><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 31:
ESC_2heap_noncoalesced<value_type, 31><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
case 32:
ESC_2heap_noncoalesced<value_type, 32><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
break;
}
err = cudaGetLastError();
if (err != cudaSuccess)
{ cout << "err = " << cudaGetErrorString(err) << endl; return -1; }
if (_profiling)
{
err = cudaDeviceSynchronize();
if (err != cudaSuccess)
{ cout << "err = " << cudaGetErrorString(err) << endl; return -1; }
sdkStopTimer(&timer);
cout << "[ " << j << " ] ESC_2heap time: " << sdkGetTimerValue(&timer) << " ms" << endl;
}
return BHSPARSE_SUCCESS;
}
__inline__ __device__
void coex(int *keyA,
value_type *valA,
int *keyB,
value_type *valB,
const int dir)
{
int t;
value_type v;
if ((*keyA > *keyB) == dir)
{
t = *keyA;
*keyA = *keyB;
*keyB = t;
v = *valA;
*valA = *valB;
*valB = v;
}
}
__inline__ __device__
void oddeven(int *s_key,
value_type *s_val,
int arrayLength)
{
int dir = 1;
for (int size = 2; size <= arrayLength; size <<= 1)
{
int stride = size >> 1;
int offset = threadIdx.x & (stride - 1);
{
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
coex(&s_key[pos], &s_val[pos], &s_key[pos + stride], &s_val[pos + stride], dir);
stride >>= 1;
}
for (; stride > 0; stride >>= 1)
{
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
if (offset >= stride)
coex(&s_key[pos - stride], &s_val[pos - stride], &s_key[pos], &s_val[pos], dir);
}
}
}
template<typename T>
__inline__ __device__
T scan_32_shfl(T x, const int local_id)
{
#pragma unroll
for( int offset = 1 ; offset < WARPSIZE_NV ; offset <<= 1 )
{
T y = __shfl_up(x, offset);
if(local_id >= offset)
x += y;
}
return x;
}
template<typename T>
__inline__ __device__
void scan_single( volatile T *s_scan,
const int local_id,
const int l)
{
T old_val, new_val;
if (!local_id)
{
old_val = s_scan[0];
s_scan[0] = 0;
for (int i = 1; i < l; i++)
{
new_val = s_scan[i];
s_scan[i] = old_val + s_scan[i-1];
old_val = new_val;
}
}
}
template<typename T>
__inline__ __device__
T scan_plus1_shfl(volatile T *s_scan,
const int local_id,
T r_in,
const int seg_num)
{
// 3-stage method. scan-scan-propogate
// shfl version
const int lane_id = local_id % WARPSIZE_NV;
const int seg_id = local_id / WARPSIZE_NV;
// stage 1. thread bunch scan
T r_scan = 0;
//if (seg_id < seg_num)
//{
r_scan = scan_32_shfl<T>(r_in, lane_id);
if (lane_id == WARPSIZE_NV - 1)
s_scan[seg_id] = r_scan;
r_scan = __shfl_up(r_scan, 1);
r_scan = lane_id ? r_scan : 0;
//}
__syncthreads();
// stage 2. one thread bunch scan
r_in = (local_id < seg_num) ? s_scan[local_id] : 0;
if (!seg_id)
r_in = scan_32_shfl<T>(r_in, lane_id);
if (local_id < seg_num)
s_scan[local_id + 1] = r_in;
// single thread in-place scan
//scan_single<T>(s_scan, local_id, seg_num+1);
__syncthreads();
// stage 3. propogate (element-wise add) to all
if (seg_id) // && seg_id < seg_num)
r_scan += s_scan[seg_id];
return r_scan;
}
template<typename sT, typename T>
__inline__ __device__
void scan_double_width_plus1_shfl(volatile sT *s_scan,
volatile T *s_scan_shfl,
const int local_id,
T r_in,
T r_in_halfwidth,
const int seg_num)
{
// 3-stage method. scan-scan-propogate
// shfl version
const int lane_id = local_id % WARPSIZE_NV;
const int seg_id = local_id / WARPSIZE_NV;
// stage 1. thread bunch scan
T r_scan = scan_32_shfl<T>(r_in, lane_id);
T r_scan_halfwidth = scan_32_shfl<T>(r_in_halfwidth, lane_id);
if (lane_id == WARPSIZE_NV - 1)
{
s_scan_shfl[seg_id] = r_scan;
s_scan_shfl[seg_id + seg_num] = r_scan_halfwidth;
}
// inclusive to exclusive
r_scan = __shfl_up(r_scan, 1);
r_scan_halfwidth = __shfl_up(r_scan_halfwidth, 1);
r_scan = lane_id ? r_scan : 0;
r_scan_halfwidth = lane_id ? r_scan_halfwidth : 0;
__syncthreads();
// stage 2. one thread bunch scan
r_in = (local_id < 2 * seg_num) ? s_scan_shfl[local_id] : 0;
if (!seg_id)
r_in = scan_32_shfl<T>(r_in, lane_id);
if (local_id < 2 * seg_num)
s_scan_shfl[local_id + 1] = r_in;
// single thread in-place scan
//scan_single<T>(s_scan_shfl, local_id, seg_num+1);
__syncthreads();
// stage 3. propogate (element-wise add) to all
if (seg_id)
{
r_scan += s_scan_shfl[seg_id];
}
r_scan_halfwidth += s_scan_shfl[seg_id + seg_num];
s_scan[local_id] = r_scan;
s_scan[local_id + blockDim.x] = r_scan_halfwidth;
if (!local_id)
s_scan[2 * blockDim.x] = s_scan_shfl[2 * seg_num];
return;
}
__inline__ __device__
void scan_32(volatile short *s_scan)
{
int ai, bi;
int baseai = 1 + 2 * threadIdx.x;
int basebi = baseai + 1;
short temp;
if (threadIdx.x < 16) { ai = baseai - 1; bi = basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 8) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 4) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 2) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x == 0) { s_scan[31] += s_scan[15]; s_scan[32] = s_scan[31]; s_scan[31] = 0; temp = s_scan[15]; s_scan[15] = 0; s_scan[31] += temp; }
if (threadIdx.x < 2) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 4) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 8) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 16) { ai = baseai - 1; bi = basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp; }
}
__inline__ __device__
void scan_64(volatile short *s_scan)
{
int ai, bi;
int baseai = 1 + 2 * threadIdx.x;
int basebi = baseai + 1;
short temp;
if (threadIdx.x < 32) { ai = baseai - 1; bi = basebi - 1; s_scan[bi] += s_scan[ai]; }
__syncthreads();
if (threadIdx.x < 16) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 8) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 4) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 2) { ai = 16 * baseai - 1; bi = 16 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x == 0) { s_scan[63] += s_scan[31]; s_scan[64] = s_scan[63]; s_scan[63] = 0; temp = s_scan[31]; s_scan[31] = 0; s_scan[63] += temp; }
if (threadIdx.x < 2) { ai = 16 * baseai - 1; bi = 16 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 4) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 8) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 16) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 32) { ai = baseai - 1; bi = basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp; }
}
__inline__ __device__
void scan_128(volatile short *s_scan)
{
int ai, bi;
int baseai = 1 + 2 * threadIdx.x;
int basebi = baseai + 1;
short temp;
if (threadIdx.x < 64) { ai = baseai - 1; bi = basebi - 1; s_scan[bi] += s_scan[ai]; }
__syncthreads();
if (threadIdx.x < 32) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; s_scan[bi] += s_scan[ai]; }
__syncthreads();
if (threadIdx.x < 16) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 8) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 4) { ai = 16 * baseai - 1; bi = 16 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 2) { ai = 32 * baseai - 1; bi = 32 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x == 0) { s_scan[127] += s_scan[63]; s_scan[128] = s_scan[127]; s_scan[127] = 0; temp = s_scan[63]; s_scan[63] = 0; s_scan[127] += temp; }
if (threadIdx.x < 2) { ai = 32 * baseai - 1; bi = 32 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 4) { ai = 16 * baseai - 1; bi = 16 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 8) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 16) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 32) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
__syncthreads();
if (threadIdx.x < 64) { ai = baseai - 1; bi = basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp; }
}
__inline__ __device__
void scan_256(volatile short *s_scan)
{
int ai, bi;
int baseai = 1 + 2 * threadIdx.x;
int basebi = baseai + 1;
short temp;
if (threadIdx.x < 128) { ai = baseai - 1; bi = basebi - 1; s_scan[bi] += s_scan[ai]; }
__syncthreads();
if (threadIdx.x < 64) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; s_scan[bi] += s_scan[ai]; }
__syncthreads();
if (threadIdx.x < 32) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; s_scan[bi] += s_scan[ai]; }
__syncthreads();
if (threadIdx.x < 16) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 8) { ai = 16 * baseai - 1; bi = 16 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 4) { ai = 32 * baseai - 1; bi = 32 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 2) { ai = 64 * baseai - 1; bi = 64 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x == 0) { s_scan[255] += s_scan[127]; s_scan[256] = s_scan[255]; s_scan[255] = 0; temp = s_scan[127]; s_scan[127] = 0; s_scan[255] += temp; }
if (threadIdx.x < 2) { ai = 64 * baseai - 1; bi = 64 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 4) { ai = 32 * baseai - 1; bi = 32 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 8) { ai = 16 * baseai - 1; bi = 16 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 16) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 32) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
__syncthreads();
if (threadIdx.x < 64) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
__syncthreads();
if (threadIdx.x < 128) { ai = baseai - 1; bi = basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp; }
}
__inline__ __device__
void scan_512(volatile short *s_scan)
{
int ai, bi;
int baseai = 1 + 2 * threadIdx.x;
int basebi = baseai + 1;
short temp;
if (threadIdx.x < 256) { ai = baseai - 1; bi = basebi - 1; s_scan[bi] += s_scan[ai]; }
__syncthreads();
if (threadIdx.x < 128) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; s_scan[bi] += s_scan[ai]; }
__syncthreads();
if (threadIdx.x < 64) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; s_scan[bi] += s_scan[ai]; }
__syncthreads();
if (threadIdx.x < 32) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; s_scan[bi] += s_scan[ai]; }
__syncthreads();
if (threadIdx.x < 16) { ai = 16 * baseai - 1; bi = 16 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 8) { ai = 32 * baseai - 1; bi = 32 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 4) { ai = 64 * baseai - 1; bi = 64 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x < 2) { ai = 128 * baseai - 1; bi = 128 * basebi - 1; s_scan[bi] += s_scan[ai]; }
if (threadIdx.x == 0) { s_scan[511] += s_scan[255]; s_scan[512] = s_scan[511]; s_scan[511] = 0; temp = s_scan[255]; s_scan[255] = 0; s_scan[511] += temp; }
if (threadIdx.x < 2) { ai = 128 * baseai - 1; bi = 128 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 4) { ai = 64 * baseai - 1; bi = 64 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 8) { ai = 32 * baseai - 1; bi = 32 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 16) { ai = 16 * baseai - 1; bi = 16 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
if (threadIdx.x < 32) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
__syncthreads();
if (threadIdx.x < 64) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
__syncthreads();
if (threadIdx.x < 128) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp;}
__syncthreads();
if (threadIdx.x < 256) { ai = baseai - 1; bi = basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp; }
}
__inline__ __device__
void compression_scan(volatile short *s_scan,
volatile int *s_scan_shfl,
int *s_key,
value_type *s_val,
const int local_counter,
const int local_size,
const int local_id,
const int local_id_halfwidth)
{
// compression - prefix sum
bool duplicate = 1;
bool duplicate_halfwidth = 1;
// generate bool value in registers
if (local_id < local_counter && local_id > 0)
duplicate = (s_key[local_id] != s_key[local_id - 1]);
if (local_id_halfwidth < local_counter)
duplicate_halfwidth = (s_key[local_id_halfwidth] != s_key[local_id_halfwidth - 1]);
#if __CUDA_ARCH__ >= 300
scan_double_width_plus1_shfl<short, int>(s_scan, s_scan_shfl, local_id,
duplicate, duplicate_halfwidth, local_size/WARPSIZE_NV);
#else
// copy bool values from register to local memory (s_scan)
s_scan[local_id] = duplicate;
s_scan[local_id_halfwidth] = duplicate_halfwidth;
__syncthreads();
// in-place exclusive prefix-sum scan on s_scan
switch (local_size)
{
case 16:
scan_32(s_scan);
break;
case 32:
scan_64(s_scan);
break;
case 64:
scan_128(s_scan);
break;
case 128:
scan_256(s_scan);
break;
case 256:
scan_512(s_scan);
break;
}
#endif
__syncthreads();
// compute final position and final value in registers
int move_pointer;
short final_position, final_position_halfwidth;
int final_key, final_key_halfwidth;
value_type final_value, final_value_halfwidth;
if (local_id < local_counter && duplicate == 1)
{
final_position = s_scan[local_id];
final_key = s_key[local_id];
final_value = s_val[local_id];
move_pointer = local_id + 1;
while (s_scan[move_pointer] == s_scan[move_pointer + 1])
{
final_value += s_val[move_pointer];
move_pointer++;
}
}
if (local_id_halfwidth < local_counter && duplicate_halfwidth == 1)
{
final_position_halfwidth = s_scan[local_id_halfwidth];
final_key_halfwidth = s_key[local_id_halfwidth];
final_value_halfwidth = s_val[local_id_halfwidth];
move_pointer = local_id_halfwidth + 1;
while (s_scan[move_pointer] == s_scan[move_pointer + 1] && move_pointer < 2 * local_size)
{
final_value_halfwidth += s_val[move_pointer];
move_pointer++;
}
}
__syncthreads();
// write final_positions and final_values to s_key and s_val
if (local_id < local_counter && duplicate == 1)
{
s_key[final_position] = final_key;
s_val[final_position] = final_value;
}
if (local_id_halfwidth < local_counter && duplicate_halfwidth == 1)
{
s_key[final_position_halfwidth] = final_key_halfwidth;
s_val[final_position_halfwidth] = final_value_halfwidth;
}
}
template<typename vT, int c_scansize>
__global__
void ESC_bitonic_scan(const int* d_queue,
const int* d_csrRowPtrA,
const int* __restrict__ d_csrColIndA,
const vT* __restrict__ d_csrValA,
const int* d_csrRowPtrB,
const int* d_csrColIndB,
const vT* d_csrValB,
int* d_csrRowPtrC,
int* d_csrColIndCt,
vT* d_csrValCt,
const int queue_offset,
const int n)
{
__shared__ int s_key[2 * c_scansize];
__shared__ vT s_val[2 * c_scansize];
__shared__ short s_scan[2 * c_scansize + 1];
#if __CUDA_ARCH__ >= 300
volatile __shared__ int s_scan_shfl[2 * c_scansize / WARPSIZE_NV + 1];
#else
volatile __shared__ int *s_scan_shfl;
#endif
int local_id = threadIdx.x;
int group_id = blockIdx.x;
int local_size = blockDim.x;
int width = local_size * 2;
int i, local_counter = 0;
int strideB, local_offset, global_offset;
int invalid_width;
int local_id_halfwidth = local_id + local_size;
int row_id_B; // index_type
int row_id;// index_type
row_id = d_queue[TUPLE_QUEUE * (queue_offset + group_id)];
int start_col_index_A, stop_col_index_A; // index_type
int start_col_index_B, stop_col_index_B; // index_type
vT value_A; // value_type
start_col_index_A = d_csrRowPtrA[row_id];
stop_col_index_A = d_csrRowPtrA[row_id + 1];
// i is both col index of A and row index of B
for (i = start_col_index_A; i < stop_col_index_A; i++)
{
row_id_B = d_csrColIndA[i];
value_A = d_csrValA[i];
start_col_index_B = d_csrRowPtrB[row_id_B];
stop_col_index_B = d_csrRowPtrB[row_id_B + 1];
strideB = stop_col_index_B - start_col_index_B;
if (local_id < strideB)
{
local_offset = local_counter + local_id;
global_offset = start_col_index_B + local_id;
s_key[local_offset] = d_csrColIndB[global_offset];
s_val[local_offset] = d_csrValB[global_offset] * value_A;
}
if (local_id_halfwidth < strideB)
{
local_offset = local_counter + local_id_halfwidth;
global_offset = start_col_index_B + local_id_halfwidth;
s_key[local_offset] = d_csrColIndB[global_offset];
s_val[local_offset] = d_csrValB[global_offset] * value_A;
}
local_counter += strideB;
}
__syncthreads();
invalid_width = width - local_counter;
// to meet 2^N, set the rest elements to n (number of columns of C)
if (local_id < invalid_width)
s_key[local_counter + local_id] = n;
//if (local_id_halfwidth < invalid_width)
// s_key[local_counter + local_id_halfwidth] = n;
__syncthreads();
// bitonic sort
oddeven(s_key, s_val, width);
__syncthreads();
// compression - scan
compression_scan(s_scan, s_scan_shfl, s_key, s_val, local_counter,
local_size, local_id, local_id_halfwidth);
__syncthreads();
local_counter = s_scan[width] - invalid_width;
if (local_id == 0)
d_csrRowPtrC[row_id] = local_counter;
// write compressed lists to global mem
int row_offset = d_queue[TUPLE_QUEUE * (queue_offset + group_id) + 1]; //d_csrRowPtrCt[row_id];
if (local_id < local_counter)
{
global_offset = row_offset + local_id;
d_csrColIndCt[global_offset] = s_key[local_id];
d_csrValCt[global_offset] = s_val[local_id];
}
if (local_id_halfwidth < local_counter)
{
global_offset = row_offset + local_id_halfwidth;
d_csrColIndCt[global_offset] = s_key[local_id_halfwidth];
d_csrValCt[global_offset] = s_val[local_id_halfwidth];
}
}
int bhsparse_cuda::compute_nnzC_Ct_bitonic(int num_threads, int num_blocks, int j, int position)
{
cudaError_t err = cudaSuccess;
StopWatchInterface *timer = NULL;
if (_profiling)
{
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
}
switch (num_threads)
{
case 16:
ESC_bitonic_scan<value_type, 16><<< num_blocks, num_threads >>>(_d_queue_one, _d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt,
position, _n);
break;
case 32:
ESC_bitonic_scan<value_type, 32><<< num_blocks, num_threads >>>(_d_queue_one, _d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt,
position, _n);
break;
case 64:
ESC_bitonic_scan<value_type, 64><<< num_blocks, num_threads >>>(_d_queue_one, _d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt,
position, _n);
break;
case 128:
ESC_bitonic_scan<value_type, 128><<< num_blocks, num_threads >>>(_d_queue_one, _d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt,
position, _n);
break;
case 256:
ESC_bitonic_scan<value_type, 256><<< num_blocks, num_threads >>>(_d_queue_one, _d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt,
position, _n);
break;
}
err = cudaGetLastError();
if (err != cudaSuccess)
{ cout << "err = " << cudaGetErrorString(err) << endl; return -1; }
if (_profiling)
{
err = cudaDeviceSynchronize();
if (err != cudaSuccess)
{ cout << "err = " << cudaGetErrorString(err) << endl; return -1; }
sdkStopTimer(&timer);
cout << "[ " << j << " ] ESC_bitonic time: " << sdkGetTimerValue(&timer) << " ms" << endl;
}
return BHSPARSE_SUCCESS;
}
__global__
void ESC_0_cudakernel(const int* d_queue,
int* d_csrRowPtrC,
const int queue_size,
const int queue_offset)
{
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < queue_size)
{
int row_id = d_queue[TUPLE_QUEUE * (queue_offset + global_id)];
d_csrRowPtrC[row_id] = 0;
}
}
__global__
void ESC_1_cudakernel(const int* d_queue,
const int* d_csrRowPtrA,
const int* __restrict__ d_csrColIndA,
const value_type* __restrict__ d_csrValA,
const int* d_csrRowPtrB,
const int* d_csrColIndB,
const value_type* d_csrValB,
int* d_csrRowPtrC,
const int* d_csrRowPtrCt,
int* d_csrColIndCt,
value_type* d_csrValCt,
const int queue_size,
const int queue_offset)
{
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < queue_size)
{
int row_id = d_queue[TUPLE_QUEUE * (queue_offset + global_id)];
d_csrRowPtrC[row_id] = 1;
int base_index = d_queue[TUPLE_QUEUE * (queue_offset + global_id) + 1]; //d_csrRowPtrCt[row_id];
int col_index_A_start = d_csrRowPtrA[row_id];
int col_index_A_stop = d_csrRowPtrA[row_id+1];
for (int col_index_A = col_index_A_start; col_index_A < col_index_A_stop; col_index_A++)
{
int row_id_B = d_csrColIndA[col_index_A];
int col_index_B = d_csrRowPtrB[row_id_B];
if (col_index_B == d_csrRowPtrB[row_id_B+1])
continue;
value_type value_A = d_csrValA[col_index_A];
d_csrColIndCt[base_index] = d_csrColIndB[col_index_B];
d_csrValCt[base_index] = d_csrValB[col_index_B] * value_A;
break;
}
}
}
int bhsparse_cuda::compute_nnzC_Ct_0(int num_threads, int num_blocks, int j, int counter, int position)
{
cudaError_t err = cudaSuccess;
StopWatchInterface *timer = NULL;
if (_profiling)
{
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
}
ESC_0_cudakernel<<< num_blocks, num_threads >>>(_d_queue_one, _d_csrRowPtrC, counter, position);
err = cudaGetLastError();
if (err != cudaSuccess)
{ cout << "err = " << cudaGetErrorString(err) << endl; return -1; }
if (_profiling)
{
err = cudaDeviceSynchronize();
if (err != cudaSuccess)
{ cout << "err = " << cudaGetErrorString(err) << endl; return -1; }
sdkStopTimer(&timer);
cout << "[ " << j << " ] ESC_0 time: " << sdkGetTimerValue(&timer) << " ms" << endl;
}
return BHSPARSE_SUCCESS;
}
int bhsparse_cuda::compute_nnzC_Ct_1(int num_threads, int num_blocks, int j, int counter, int position)
{
cudaError_t err = cudaSuccess;
StopWatchInterface *timer = NULL;
if (_profiling)
{
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
}
ESC_1_cudakernel<<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
counter, position);
err = cudaGetLastError();
if (err != cudaSuccess)
{ cout << "err = " << cudaGetErrorString(err) << endl; return -1; }
if (_profiling)
{
err = cudaDeviceSynchronize();
if (err != cudaSuccess)
{ cout << "err = " << cudaGetErrorString(err) << endl; return -1; }
sdkStopTimer(&timer);
cout << "[ " << j << " ] ESC_0 time: " << sdkGetTimerValue(&timer) << " ms" << endl;
}
return BHSPARSE_SUCCESS;
}
__inline__ __device__
void binarysearch_sub(int *s_key,
value_type *s_val,
int key_input,
value_type val_input,
int merged_size)
{
int start = 0;
int stop = merged_size - 1;
int median;
int key_median;
while (stop >= start)
{
median = (stop + start) / 2;
key_median = s_key[median];
if (key_input > key_median)
start = median + 1;
else if (key_input < key_median)
stop = median - 1;
else
{
// atomicAdd is not needed since duplicate is not existed in each input row
s_val[median] -= val_input;
break;
}
}
//return start;
}
__inline__ __device__
void binarysearch(int *s_key,
value_type *s_val,
int key_input,
value_type val_input,
int merged_size,
bool *is_new_col)
{
int start = 0;
int stop = merged_size - 1;
int median;
int key_median;
while (stop >= start)
{
median = (stop + start) / 2;
key_median = s_key[median];
if (key_input > key_median)
start = median + 1;
else if (key_input < key_median)
stop = median - 1;
else
{
// atomicAdd is not needed since duplicate is not existed in each input row
s_val[median] += val_input;
*is_new_col = 0;
break;
}
}
//return start;
}
__inline__ __device__
void scan(volatile short *s_scan)
{
switch (blockDim.x)
{
case 32:
scan_32(s_scan);
break;
case 64:
scan_64(s_scan);
break;
case 128:
scan_128(s_scan);
break;
case 256:
scan_256(s_scan);
break;
case 512:
scan_512(s_scan);
break;
}
}
__inline__ __device__
bool comp(int a, int b)
{
return a < b ? true : false;
}
__inline__ __device__
int mergepath_partition(int *a,
const int aCount,
int *b,
const int bCount,
const int diag)
{
int begin = max(0, diag - bCount);
int end = min(diag, aCount);
int mid;
int key_a, key_b;
bool pred;
while(begin < end)
{
mid = (begin + end) >> 1;
key_a = a[mid];
key_b = b[diag - 1 - mid];
pred = comp(key_a, key_b);
if(pred)
begin = mid + 1;
else
end = mid;
}
return begin;
}
__inline__ __device__
void mergepath_serialmerge(int *s_key,
value_type *s_val,
int aBegin,
const int aEnd,
int bBegin,
const int bEnd,
int *reg_key,
value_type *reg_val,
const int VT)
{
int key_a = s_key[aBegin];
int key_b = s_key[bBegin];
bool p;
for(int i = 0; i < VT; ++i)
{
p = (bBegin >= bEnd) || ((aBegin < aEnd) && !comp(key_b, key_a));
reg_key[i] = p ? key_a : key_b;
reg_val[i] = p ? s_val[aBegin] : s_val[bBegin];
if(p)
key_a = s_key[++aBegin];
else
key_b = s_key[++bBegin];
}
}
__inline__ __device__
void readwrite_mergedlist(int *d_csrColIndCt,
value_type *d_csrValCt,
int *s_key_merged,
value_type *s_val_merged,
const int merged_size,
const int row_offset,
const bool is_write)
{
int stride, offset_local_id, global_offset;
int loop = ceil((float)merged_size / (float)blockDim.x);
for (int i = 0; i < loop; i++)
{
stride = i != loop - 1 ? blockDim.x : merged_size - i * blockDim.x;
offset_local_id = i * blockDim.x + threadIdx.x;
global_offset = row_offset + offset_local_id;
if (threadIdx.x < stride)
{
if (is_write)
{
d_csrColIndCt[global_offset] = s_key_merged[offset_local_id];
d_csrValCt[global_offset] = s_val_merged[offset_local_id];
}
else
{
s_key_merged[offset_local_id] = d_csrColIndCt[global_offset];
s_val_merged[offset_local_id] = d_csrValCt[global_offset];
}
}
}
}
template<typename vT, int c_buffsize, int c_scansize>
__global__
void EM_mergepath(int * d_queue,
const int * d_csrRowPtrA,
const int * __restrict__ d_csrColIndA,
const vT * __restrict__ d_csrValA,
const int * d_csrRowPtrB,
const int * d_csrColIndB,
const vT * d_csrValB,
int *d_csrRowPtrC,
int *d_csrColIndCt,
vT *d_csrValCt,
const int queue_offset)
{
__shared__ int s_key_merged[c_buffsize+1];
__shared__ vT s_val_merged[c_buffsize+1];
#if __CUDA_ARCH__ >= 300
int seg_num = c_scansize / WARPSIZE_NV;
volatile __shared__ int s_scan[c_scansize / WARPSIZE_NV + 1];
#else
volatile __shared__ short s_scan[c_scansize+1];
#endif
const int queue_id = TUPLE_QUEUE * (queue_offset + blockIdx.x);
// if merged size equals -1, kernel return since this row is done
int merged_size = d_queue[queue_id + 2];
const int local_id = threadIdx.x; //threadIdx.x;
const int row_id = d_queue[queue_id];
const int local_size = blockDim.x;
const float local_size_value_type = local_size;
int reg_reuse1;
int col_Ct; // index_type
vT val_Ct; // value_type
vT val_A; // value_type
int start_col_index_A, stop_col_index_A; // index_type
int start_col_index_B, stop_col_index_B; // index_type
bool is_new_col;
bool is_last;
int VT, diag, mp;
int reg_key[16];
vT reg_val[16];
start_col_index_A = d_csrRowPtrA[row_id];
stop_col_index_A = d_csrRowPtrA[row_id + 1];
if (merged_size == 0)
{
is_last = false;
// read the first set of current nnzCt row to merged list
reg_reuse1 = d_csrColIndA[start_col_index_A]; // reg_reuse1 = row_id_B
val_A = d_csrValA[start_col_index_A];
start_col_index_B = d_csrRowPtrB[reg_reuse1]; // reg_reuse1 = row_id_B
stop_col_index_B = d_csrRowPtrB[reg_reuse1 + 1]; // reg_reuse1 = row_id_B
const int stride = stop_col_index_B - start_col_index_B;
const int loop = ceil((float)stride / local_size_value_type); //ceil((value_type)stride / (value_type)local_size);
start_col_index_B += local_id;
for (int k = 0; k < loop; k++)
{
reg_reuse1 = k != loop - 1 ? local_size : stride - k * local_size; // reg_reuse1 = input_size
// if merged_size + reg_reuse1 > c_buffsize, write it to global mem and return
if (merged_size + reg_reuse1 > c_buffsize)
{
// write a signal to some place, not equals -1 means next round is needed
if (local_id == 0)
{
d_queue[queue_id + 2] = merged_size;
d_queue[queue_id + 3] = start_col_index_A;
d_queue[queue_id + 4] = start_col_index_B;
}
// dump current data to global mem
reg_reuse1 = d_queue[queue_id + 1];
readwrite_mergedlist(d_csrColIndCt, d_csrValCt, s_key_merged, s_val_merged, merged_size, reg_reuse1, 1);
return;
}
if (start_col_index_B < stop_col_index_B)
{
col_Ct = d_csrColIndB[start_col_index_B];
val_Ct = d_csrValB[start_col_index_B] * val_A;
s_key_merged[merged_size + local_id] = col_Ct;
s_val_merged[merged_size + local_id] = val_Ct;
}
merged_size += reg_reuse1; // reg_reuse1 = input_size
start_col_index_B += local_size;
}
start_col_index_A++;
}
else
{
is_last = true;
start_col_index_A = d_queue[queue_id + 3];
// load existing merged list
reg_reuse1 = d_queue[queue_id + 5];
readwrite_mergedlist(d_csrColIndCt, d_csrValCt, s_key_merged, s_val_merged, merged_size, reg_reuse1, 0);
}
__syncthreads();
// merge the rest of sets of current nnzCt row to the merged list
while (start_col_index_A < stop_col_index_A)
{
reg_reuse1 = d_csrColIndA[start_col_index_A]; // reg_reuse1 = row_id_B
val_A = d_csrValA[start_col_index_A];
start_col_index_B = is_last ? d_queue[queue_id + 4] : d_csrRowPtrB[reg_reuse1]; // reg_reuse1 = row_id_B
is_last = false;
stop_col_index_B = d_csrRowPtrB[reg_reuse1 + 1]; // reg_reuse1 = row_id_B
const int stride = stop_col_index_B - start_col_index_B;
const int loop = ceil((float)stride / local_size_value_type); //ceil((value_type)stride / (value_type)local_size);
//int start_col_index_B_zeropoint = start_col_index_B;
start_col_index_B += local_id;
for (int k = 0; k < loop; k++)
{
__syncthreads();
is_new_col = 0;
if (start_col_index_B < stop_col_index_B)
{
col_Ct = d_csrColIndB[start_col_index_B];
val_Ct = d_csrValB[start_col_index_B] * val_A;
// binary search on existing sorted list
// if the column is existed, add the value to the position
// else, set scan value to 1, and wait for scan
is_new_col = 1;
binarysearch(s_key_merged, s_val_merged, col_Ct, val_Ct, merged_size, &is_new_col);
}
#if __CUDA_ARCH__ >= 300
//const int seg_num = (k == loop - 1) ?
// ceil((float)(stop_col_index_B - start_col_index_B_zeropoint) / (float)WARPSIZE_NV) :
// local_size / WARPSIZE_NV;
//if (!local_id)
// printf("blockIdx = %d, seg_num = %d\n", blockIdx.x, seg_num);
int r_scan = scan_plus1_shfl<int>(s_scan, local_id, is_new_col, seg_num);
const int s_scan_sum = s_scan[seg_num];
#else
s_scan[local_id] = is_new_col;
__syncthreads();
// scan with half-local_size work-items
// s_scan[local_size] is the size of input non-duplicate array
scan(s_scan);
__syncthreads();
const int s_scan_sum = s_scan[local_size];
#endif
// if all elements are absorbed into merged list,
// the following work in this inner-loop is not needed any more
if (s_scan_sum == 0)
{
start_col_index_B += local_size;
//start_col_index_B_zeropoint += local_size;
continue;
}
// check if the total size is larger than the capicity of merged list
if (merged_size + s_scan_sum > c_buffsize)
{
// roll back 'binary serach plus' in this round
if (start_col_index_B < stop_col_index_B)
{
binarysearch_sub(s_key_merged, s_val_merged, col_Ct, val_Ct, merged_size);
}
__syncthreads();
// write a signal to some place, not equals -1 means next round is needed
if (local_id == 0)
{
d_queue[queue_id + 2] = merged_size;
d_queue[queue_id + 3] = start_col_index_A;
d_queue[queue_id + 4] = start_col_index_B;
}
// dump current data to global mem
reg_reuse1 = d_queue[queue_id + 1]; //d_csrRowPtrCt[row_id];
readwrite_mergedlist(d_csrColIndCt, d_csrValCt, s_key_merged, s_val_merged, merged_size, reg_reuse1, 1);
return;
}
// write compact input to free place in merged list
if(is_new_col)
{
#if __CUDA_ARCH__ >= 300
reg_reuse1 = merged_size + r_scan;
#else
reg_reuse1 = merged_size + s_scan[local_id];
#endif
s_key_merged[reg_reuse1] = col_Ct;
s_val_merged[reg_reuse1] = val_Ct;
}
__syncthreads();
// merge path partition
VT = ceil((float)(merged_size + s_scan_sum) / local_size_value_type);
diag = VT * local_id;
mp = mergepath_partition(s_key_merged, merged_size, &s_key_merged[merged_size], s_scan_sum, diag);
mergepath_serialmerge(s_key_merged, s_val_merged,
mp, merged_size, merged_size + diag - mp, merged_size + s_scan_sum,
reg_key, reg_val, VT);
__syncthreads();
for (int is = 0; is < VT; is++)
{
s_key_merged[diag + is] = reg_key[is];
s_val_merged[diag + is] = reg_val[is];
}
__syncthreads();
merged_size += s_scan_sum;
start_col_index_B += local_size;
//start_col_index_B_zeropoint += local_size;
}
start_col_index_A++;
}
__syncthreads();
if (local_id == 0)
{
d_csrRowPtrC[row_id] = merged_size;
d_queue[queue_id + 2] = -1;
}
// write merged list to global mem
reg_reuse1 = d_queue[queue_id + 1]; //d_csrRowPtrCt[row_id];
readwrite_mergedlist(d_csrColIndCt, d_csrValCt, s_key_merged, s_val_merged, merged_size, reg_reuse1, 1);
}
template<typename vT, int c_buffsize, int c_scansize>
__global__
void EM_mergepath_global(int * d_queue,
const int * d_csrRowPtrA,
const int * __restrict__ d_csrColIndA,
const vT * __restrict__ d_csrValA,
const int * d_csrRowPtrB,
const int * d_csrColIndB,
const vT * d_csrValB,
int *d_csrRowPtrC,
int *d_csrColIndCt,
vT *d_csrValCt,
const int queue_offset)
{
__shared__ int s_key_merged_l1[c_buffsize+1];
__shared__ vT s_val_merged_l1[c_buffsize+1];
#if __CUDA_ARCH__ >= 300
const int seg_num = c_scansize / WARPSIZE_NV;
volatile __shared__ int s_scan[c_scansize / WARPSIZE_NV + 1];
#else
volatile __shared__ short s_scan[c_scansize+1];
#endif
int queue_id = TUPLE_QUEUE * (queue_offset + blockIdx.x);
// if merged size equals -1, kernel return since this row is done
int merged_size_l2 = d_queue[queue_id + 2];
int merged_size_l1 = 0;
int local_id = threadIdx.x; //threadIdx.x;
int row_id = d_queue[queue_id];
int local_size = blockDim.x;
float local_size_value_type = local_size;
int stride, loop;
int reg_reuse1;
int col_Ct; // index_type
vT val_Ct; // vT
vT val_A; // vT
int start_col_index_A, stop_col_index_A; // index_type
int start_col_index_B, stop_col_index_B; // index_type
int k, is;
bool is_new_col;
bool is_last;
int VT, diag, mp;
int reg_key[80];
vT reg_val[80];
start_col_index_A = d_csrRowPtrA[row_id];
stop_col_index_A = d_csrRowPtrA[row_id + 1];
is_last = true;
start_col_index_A = d_queue[queue_id + 3];
// load existing merged list
reg_reuse1 = d_queue[queue_id + 1];
int *d_key_merged = &d_csrColIndCt[reg_reuse1];
vT *d_val_merged = &d_csrValCt[reg_reuse1];
reg_reuse1 = d_queue[queue_id + 5];
readwrite_mergedlist(d_csrColIndCt, d_csrValCt, d_key_merged, d_val_merged, merged_size_l2, reg_reuse1, 0);
__syncthreads();
// merge the rest of sets of current nnzCt row to the merged list
while (start_col_index_A < stop_col_index_A)
{
reg_reuse1 = d_csrColIndA[start_col_index_A]; // reg_reuse1 = row_id_B
val_A = d_csrValA[start_col_index_A];
start_col_index_B = is_last ? d_queue[queue_id + 4] : d_csrRowPtrB[reg_reuse1]; // reg_reuse1 = row_id_B
is_last = false;
stop_col_index_B = d_csrRowPtrB[reg_reuse1 + 1]; // reg_reuse1 = row_id_B
stride = stop_col_index_B - start_col_index_B;
loop = ceil(stride / local_size_value_type); //ceil((value_type)stride / (value_type)local_size);
start_col_index_B += local_id;
for (k = 0; k < loop; k++)
{
__syncthreads();
is_new_col = 0;
if (start_col_index_B < stop_col_index_B)
{
col_Ct = d_csrColIndB[start_col_index_B];
val_Ct = d_csrValB[start_col_index_B] * val_A;
// binary search on existing sorted list
// if the column is existed, add the value to the position
// else, set scan value to 1, and wait for scan
is_new_col = 1;
// search on l2
binarysearch(d_key_merged, d_val_merged, col_Ct, val_Ct, merged_size_l2, &is_new_col);
// search on l1
if (is_new_col == 1)
binarysearch(s_key_merged_l1, s_val_merged_l1, col_Ct, val_Ct, merged_size_l1, &is_new_col);
}
#if __CUDA_ARCH__ >= 300
int r_scan = scan_plus1_shfl<int>(s_scan, local_id, is_new_col, seg_num);
const int s_scan_sum = s_scan[seg_num];
#else
s_scan[local_id] = is_new_col;
__syncthreads();
// scan with half-local_size work-items
// s_scan[local_size] is the size of input non-duplicate array
scan(s_scan);
__syncthreads();
const int s_scan_sum = s_scan[local_size];
#endif
// if all elements are absorbed into merged list,
// the following work in this inner-loop is not needed any more
if (s_scan_sum == 0)
{
start_col_index_B += local_size;
continue;
}
// check if the total size is larger than the capicity of merged list
if (merged_size_l1 + s_scan_sum > c_buffsize)
{
if (start_col_index_B < stop_col_index_B)
{
// rollback on l2
binarysearch_sub(d_key_merged, d_val_merged, col_Ct, val_Ct, merged_size_l2);
// rollback on l1
binarysearch_sub(s_key_merged_l1, s_val_merged_l1, col_Ct, val_Ct, merged_size_l1);
}
__syncthreads();
// write a signal to some place, not equals -1 means next round is needed
if (local_id == 0)
{
d_queue[queue_id + 2] = merged_size_l2 + merged_size_l1;
d_queue[queue_id + 3] = start_col_index_A;
d_queue[queue_id + 4] = start_col_index_B;
}
// dump l1 to global
readwrite_mergedlist(d_key_merged, d_val_merged, s_key_merged_l1, s_val_merged_l1,
merged_size_l1, merged_size_l2, 1);
__syncthreads();
// merge l2 + l1 on global
VT = ceil((merged_size_l2 + merged_size_l1) / local_size_value_type);
diag = VT * local_id;
mp = mergepath_partition(d_key_merged, merged_size_l2,
&d_key_merged[merged_size_l2], merged_size_l1, diag);
mergepath_serialmerge(d_key_merged, d_val_merged,
mp, merged_size_l2, merged_size_l2 + diag - mp, merged_size_l2 + merged_size_l1,
reg_key, reg_val, VT);
__syncthreads();
for (is = 0; is < VT; is++)
{
d_key_merged[diag + is] = reg_key[is];
d_val_merged[diag + is] = reg_val[is];
}
return;
}
// write compact input to free place in merged list
if(is_new_col)
{
#if __CUDA_ARCH__ >= 300
reg_reuse1 = merged_size_l1 + r_scan;
#else
reg_reuse1 = merged_size_l1 + s_scan[local_id];
#endif
s_key_merged_l1[reg_reuse1] = col_Ct;
s_val_merged_l1[reg_reuse1] = val_Ct;
}
__syncthreads();
// merge path partition on l1
VT = ceil((merged_size_l1 + s_scan_sum) / local_size_value_type);
diag = VT * local_id;
mp = mergepath_partition(s_key_merged_l1, merged_size_l1,
&s_key_merged_l1[merged_size_l1], s_scan_sum, diag);
mergepath_serialmerge(s_key_merged_l1, s_val_merged_l1,
mp, merged_size_l1, merged_size_l1 + diag - mp, merged_size_l1 + s_scan_sum,
reg_key, reg_val, VT);
__syncthreads();
for (is = 0; is < VT; is++)
{
s_key_merged_l1[diag + is] = reg_key[is];
s_val_merged_l1[diag + is] = reg_val[is];
}
__syncthreads();
merged_size_l1 += s_scan_sum;
start_col_index_B += local_size;
}
start_col_index_A++;
}
__syncthreads();
if (local_id == 0)
{
d_csrRowPtrC[row_id] = merged_size_l2 + merged_size_l1;
d_queue[queue_id + 2] = -1;
}
// dump l1 to global
readwrite_mergedlist(d_key_merged, d_val_merged, s_key_merged_l1, s_val_merged_l1,
merged_size_l1, merged_size_l2, 1);
__syncthreads();
// merge l2 + l1 on global
VT = ceil((merged_size_l2 + merged_size_l1) / local_size_value_type);
diag = VT * local_id;
mp = mergepath_partition(d_key_merged, merged_size_l2,
&d_key_merged[merged_size_l2], merged_size_l1, diag);
mergepath_serialmerge(d_key_merged, d_val_merged,
mp, merged_size_l2, merged_size_l2 + diag - mp, merged_size_l2 + merged_size_l1,
reg_key, reg_val, VT);
__syncthreads();
for (is = 0; is < VT; is++)
{
d_key_merged[diag + is] = reg_key[is];
d_val_merged[diag + is] = reg_val[is];
}
}
int bhsparse_cuda::compute_nnzC_Ct_mergepath(int num_threads, int num_blocks, int j,
int mergebuffer_size, int position, int *count_next, int mergepath_location)
{
cudaError_t err = cudaSuccess;
StopWatchInterface *timer = NULL;
if (_profiling)
{
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
}
if (mergepath_location == MERGEPATH_LOCAL)
{
//cout << "doing merge with num_threads = " << num_threads << endl;
switch (mergebuffer_size)
{
case 256:
EM_mergepath<value_type, 256, 64><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt, position);
break;
case 512:
EM_mergepath<value_type, 512, 128><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt, position);
break;
case 1024:
EM_mergepath<value_type, 1024, 256><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt, position);
break;
case 2048:
EM_mergepath<value_type, 2048, 256><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt, position);
break;
case 2560:
EM_mergepath<value_type, 2560, 256><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt, position);
break;
}
}
else if (mergepath_location == MERGEPATH_GLOBAL)
{
//cout << "EM_mergepath_global is called." << endl;
EM_mergepath_global<value_type, 2560, 256><<< num_blocks, num_threads >>>(_d_queue_one,
_d_csrRowPtrA, _d_csrColIndA, _d_csrValA,
_d_csrRowPtrB, _d_csrColIndB, _d_csrValB,
_d_csrRowPtrC, _d_csrColIndCt, _d_csrValCt, position);
}
err = cudaGetLastError();
if (err != cudaSuccess) { cout << "err = " << cudaGetErrorString(err) << endl; return -1; }
if (_profiling)
{
err = cudaDeviceSynchronize();
sdkStopTimer(&timer);
cout << "[ " << j << " ] EM_mergepath time: " << sdkGetTimerValue(&timer) << " ms" << endl;
}
// load d_queue back, check if there is still any row needs next level merge,
checkCudaErrors(cudaMemcpy(&_h_queue_one[TUPLE_QUEUE * position],
&_d_queue_one[TUPLE_QUEUE * position],
TUPLE_QUEUE * num_blocks * sizeof(int), cudaMemcpyDeviceToHost));
int temp_queue [6] = {0, 0, 0, 0, 0, 0};
int counter = 0;
int temp_num = 0;
for (int i = position; i < position + num_blocks; i++)
{
// if yes, (1)malloc device mem, (2)upgrade mem address on pos1 and (3)use pos5 as last mem address
if (_h_queue_one[TUPLE_QUEUE * i + 2] != -1)
{
temp_queue[0] = _h_queue_one[TUPLE_QUEUE * i]; // row id
if (mergepath_location == MERGEPATH_LOCAL)
{
int accum = 0;
switch (mergebuffer_size)
{
case 256:
accum = 512;
break;
case 512:
accum = 1024;
break;
case 1024:
accum = 2048;
break;
case 2048:
accum = 2560;
break;
case 2560:
accum = 2560 * 2;
break;
}
temp_queue[1] = _nnzCt + counter * accum; // new start address
}
else if (mergepath_location == MERGEPATH_GLOBAL)
temp_queue[1] = _nnzCt + counter * (mergebuffer_size + 2560); // new start address
//temp_queue[1] = _nnzCt + counter * mergebuffer_size * 2; // new start address
temp_queue[2] = _h_queue_one[TUPLE_QUEUE * i + 2]; // merged size
temp_queue[3] = _h_queue_one[TUPLE_QUEUE * i + 3]; // i
temp_queue[4] = _h_queue_one[TUPLE_QUEUE * i + 4]; // k
temp_queue[5] = _h_queue_one[TUPLE_QUEUE * i + 1]; // old start address
_h_queue_one[TUPLE_QUEUE * i] = _h_queue_one[TUPLE_QUEUE * (position + counter)]; // row id
_h_queue_one[TUPLE_QUEUE * i + 1] = _h_queue_one[TUPLE_QUEUE * (position + counter) + 1]; // new start address
_h_queue_one[TUPLE_QUEUE * i + 2] = _h_queue_one[TUPLE_QUEUE * (position + counter) + 2]; // merged size
_h_queue_one[TUPLE_QUEUE * i + 3] = _h_queue_one[TUPLE_QUEUE * (position + counter) + 3]; // i
_h_queue_one[TUPLE_QUEUE * i + 4] = _h_queue_one[TUPLE_QUEUE * (position + counter) + 4]; // k
_h_queue_one[TUPLE_QUEUE * i + 5] = _h_queue_one[TUPLE_QUEUE * (position + counter) + 5]; // old start address
_h_queue_one[TUPLE_QUEUE * (position + counter)] = temp_queue[0]; // row id
_h_queue_one[TUPLE_QUEUE * (position + counter) + 1] = temp_queue[1]; // new start address
_h_queue_one[TUPLE_QUEUE * (position + counter) + 2] = temp_queue[2]; // merged size
_h_queue_one[TUPLE_QUEUE * (position + counter) + 3] = temp_queue[3]; // i
_h_queue_one[TUPLE_QUEUE * (position + counter) + 4] = temp_queue[4]; // k
_h_queue_one[TUPLE_QUEUE * (position + counter) + 5] = temp_queue[5]; // old start address
counter++;
temp_num += _h_queue_one[TUPLE_QUEUE * i + 2];
}
}
if (counter > 0)
{
//int nnzCt_new = _nnzCt + counter * mergebuffer_size * 2;
int nnzCt_new;
if (mergepath_location == MERGEPATH_LOCAL)
{
int accum = 0;
switch (mergebuffer_size)
{
case 256:
accum = 512;
break;
case 512:
accum = 1024;
break;
case 1024:
accum = 2048;
break;
case 2048:
accum = 2560;
break;
case 2560:
accum = 2560 * 2;
break;
}
nnzCt_new = _nnzCt + counter * accum; //_nnzCt + counter * mergebuffer_size * 2;
}
else if (mergepath_location == MERGEPATH_GLOBAL)
nnzCt_new = _nnzCt + counter * (mergebuffer_size + 2560);
cout << "nnzCt_new = " << nnzCt_new << endl;
// malloc new device memory
index_type *d_csrColIndCt_new;
//checkCudaErrors(cudaMalloc((void **)&d_csrColIndCt_new, nnzCt_new * sizeof(index_type)));
err = cudaMalloc((void **)&d_csrColIndCt_new, nnzCt_new * sizeof(index_type));
if (err != cudaSuccess)
{
//cout << "errb = " << cudaGetErrorString(err) << ". malloc extra memory." << endl;
index_type *h_csrColIndCt = (index_type *)malloc(_nnzCt * sizeof(index_type));
// copy last device mem to a temp space on host
checkCudaErrors(cudaMemcpy(h_csrColIndCt, _d_csrColIndCt, _nnzCt * sizeof(index_type), cudaMemcpyDeviceToHost));
//cout << "err1c = " << cudaGetErrorString(err) << ". ." << endl;
//err = cudaDeviceSynchronize();
// free last device mem
checkCudaErrors(cudaFree(_d_csrColIndCt));
//cout << "err2c = " << cudaGetErrorString(err) << ". ." << endl;
//err = cudaDeviceSynchronize();
checkCudaErrors(cudaMalloc((void **)&d_csrColIndCt_new, nnzCt_new * sizeof(index_type)));
//cout << "err3c = " << cudaGetErrorString(err) << ". ." << endl;
// copy data in the temp space on host to device
checkCudaErrors(cudaMemcpy(d_csrColIndCt_new, h_csrColIndCt, _nnzCt * sizeof(index_type), cudaMemcpyHostToDevice));
//cout << "err4c = " << cudaGetErrorString(err) << ". ." << endl;
free(h_csrColIndCt);
}
else
{
checkCudaErrors(cudaMemcpy(d_csrColIndCt_new, _d_csrColIndCt, _nnzCt * sizeof(index_type), cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaFree(_d_csrColIndCt));
}
_d_csrColIndCt = d_csrColIndCt_new;
value_type *d_csrValCt_new;
//checkCudaErrors(cudaMalloc((void **)&d_csrValCt_new, nnzCt_new * sizeof(value_type)));
err = cudaMalloc((void **)&d_csrValCt_new, nnzCt_new * sizeof(value_type));
if (err != cudaSuccess)
{
//cout << "erra = " << cudaGetErrorString(err) << ". malloc extra memory." << endl;
value_type *h_csrValCt = (value_type *)malloc(_nnzCt * sizeof(value_type));
// copy last device mem to a temp space on host
checkCudaErrors(cudaMemcpy(h_csrValCt, _d_csrValCt, _nnzCt * sizeof(value_type), cudaMemcpyDeviceToHost));
//cout << "err1v = " << cudaGetErrorString(err) << ". ." << endl;
//err = cudaDeviceSynchronize();
// free last device mem
checkCudaErrors(cudaFree(_d_csrValCt));
//cout << "err2v = " << cudaGetErrorString(err) << ". ." << endl;
//err = cudaDeviceSynchronize();
checkCudaErrors(cudaMalloc((void **)&d_csrValCt_new, nnzCt_new * sizeof(value_type)));
//cout << "err3v = " << cudaGetErrorString(err) << ". ." << endl;
// copy data in the temp space on host to device
checkCudaErrors(cudaMemcpy(d_csrValCt_new, h_csrValCt, _nnzCt * sizeof(value_type), cudaMemcpyHostToDevice));
//cout << "err4v = " << cudaGetErrorString(err) << ". ." << endl;
free(h_csrValCt);
}
else
{
// copy last device mem to current one, device to device copy
checkCudaErrors(cudaMemcpy(d_csrValCt_new, _d_csrValCt, _nnzCt * sizeof(value_type), cudaMemcpyDeviceToDevice));
// free last device mem
checkCudaErrors(cudaFree(_d_csrValCt));
}
_d_csrValCt = d_csrValCt_new;
// rewrite d_queue
checkCudaErrors(cudaMemcpy(&_d_queue_one[TUPLE_QUEUE * position],
&_h_queue_one[TUPLE_QUEUE * position],
TUPLE_QUEUE * num_blocks * sizeof(int), cudaMemcpyHostToDevice));
//cout << "seems good." << endl;
_nnzCt = nnzCt_new;
}
*count_next = counter;
return BHSPARSE_SUCCESS;
}
int bhsparse_cuda::create_C()
{
int err = 0;
checkCudaErrors(cudaMemcpy(_h_csrRowPtrC, _d_csrRowPtrC, (_m + 1) * sizeof(index_type), cudaMemcpyDeviceToHost));
int old_val, new_val;
old_val = _h_csrRowPtrC[0];
_h_csrRowPtrC[0] = 0;
for (int i = 1; i <= _m; i++)
{
new_val = _h_csrRowPtrC[i];
_h_csrRowPtrC[i] = old_val + _h_csrRowPtrC[i-1];
old_val = new_val;
}
_nnzC = _h_csrRowPtrC[_m];
// create device mem of C
checkCudaErrors(cudaMalloc((void **)&_d_csrColIndC, _nnzC * sizeof(index_type)));
checkCudaErrors(cudaMalloc((void **)&_d_csrValC, _nnzC * sizeof(value_type)));
checkCudaErrors(cudaMemset(_d_csrColIndC, 0, _nnzC * sizeof(index_type)));
checkCudaErrors(cudaMemset(_d_csrValC, 0, _nnzC * sizeof(value_type)));
checkCudaErrors(cudaMemcpy(_d_csrRowPtrC, _h_csrRowPtrC, (_m + 1) * sizeof(index_type), cudaMemcpyHostToDevice));
return err;
}
__global__ void
copyCt2C_Single(const int* d_csrRowPtrC,
int* d_csrColIndC,
value_type* d_csrValC,
const int* d_csrRowPtrCt,
const int* d_csrColIndCt,
const value_type* d_csrValCt,
const int* d_queue,
const int size,
const int d_queue_offset)
{
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
bool valid = (global_id < size);
int row_id = valid ? d_queue[TUPLE_QUEUE * (d_queue_offset + global_id)] : 0;
int Ct_base_start = valid ? d_queue[TUPLE_QUEUE * (d_queue_offset + global_id) + 1] : 0; //d_csrRowPtrCt[row_id] : 0;
int C_base_start = valid ? d_csrRowPtrC[row_id] : 0;
int colC = valid ? d_csrColIndCt[Ct_base_start] : 0;
value_type valC = valid ? d_csrValCt[Ct_base_start] : 0.0f;
if (valid)
{
d_csrColIndC[C_base_start] = colC;
d_csrValC[C_base_start] = valC;
}
}
__global__ void
copyCt2C_Loopless(const int* d_csrRowPtrC,
int* d_csrColIndC,
value_type* d_csrValC,
const int* d_csrRowPtrCt,
const int* d_csrColIndCt,
const value_type* d_csrValCt,
const int* d_queue,
const int d_queue_offset)
{
int local_id = threadIdx.x;
int group_id = blockIdx.x;
int row_id = d_queue[TUPLE_QUEUE * (d_queue_offset + group_id)];
int Ct_base_start = d_queue[TUPLE_QUEUE * (d_queue_offset + group_id) + 1] + local_id; //d_csrRowPtrCt[row_id] + local_id;
int C_base_start = d_csrRowPtrC[row_id] + local_id;
int C_base_stop = d_csrRowPtrC[row_id + 1];
if (C_base_start < C_base_stop)
{
d_csrColIndC[C_base_start] = d_csrColIndCt[Ct_base_start];
d_csrValC[C_base_start] = d_csrValCt[Ct_base_start];
}
}
__global__ void
copyCt2C_Loop(const int* d_csrRowPtrC,
int* d_csrColIndC,
value_type* d_csrValC,
const int* d_csrRowPtrCt,
const int* d_csrColIndCt,
const value_type* d_csrValCt,
const int* d_queue,
const int d_queue_offset)
{
int local_id = threadIdx.x;
int group_id = blockIdx.x;
int local_size = blockDim.x;
int row_id = d_queue[TUPLE_QUEUE * (d_queue_offset + group_id)];
int Ct_base_start = d_queue[TUPLE_QUEUE * (d_queue_offset + group_id) + 1]; //d_csrRowPtrCt[row_id];
int C_base_start = d_csrRowPtrC[row_id];
int C_base_stop = d_csrRowPtrC[row_id + 1];
int stride = C_base_stop - C_base_start;
bool valid;
int loop = ceil((float)stride / (float)local_size);
C_base_start += local_id;
Ct_base_start += local_id;
for (int i = 0; i < loop; i++)
{
valid = (C_base_start < C_base_stop);
if (valid)
{
d_csrColIndC[C_base_start] = d_csrColIndCt[Ct_base_start];
d_csrValC[C_base_start] = d_csrValCt[Ct_base_start];
}
C_base_start += local_size;
Ct_base_start += local_size;
}
}
int bhsparse_cuda::copy_Ct_to_C_Single(int num_threads, int num_blocks, int local_size, int position)
{
cudaError_t err = cudaSuccess;
StopWatchInterface *timer = NULL;
if (_profiling)
{
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
}
copyCt2C_Single<<< num_blocks, num_threads >>>(_d_csrRowPtrC, _d_csrColIndC, _d_csrValC,
_d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
_d_queue_one, local_size, position);
err = cudaGetLastError();
if (err != cudaSuccess)
{ cout << "err = " << cudaGetErrorString(err) << endl; return -1; }
if (_profiling)
{
err = cudaDeviceSynchronize();
if (err != cudaSuccess)
{ cout << "err = " << cudaGetErrorString(err) << endl; return -1; }
sdkStopTimer(&timer);
//cout << "copyCt2C_Single[ " << j << " ] time = " << sdkGetTimerValue(&timer) << " ms" << endl;
}
return BHSPARSE_SUCCESS;
}
int bhsparse_cuda::copy_Ct_to_C_Loopless(int num_threads, int num_blocks, int j, int position)
{
cudaError_t err = cudaSuccess;
StopWatchInterface *timer = NULL;
if (_profiling)
{
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
}
copyCt2C_Loopless<<< num_blocks, num_threads >>>(_d_csrRowPtrC, _d_csrColIndC, _d_csrValC,
_d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
_d_queue_one, position);
err = cudaGetLastError();
if (err != cudaSuccess)
{ cout << "err = " << cudaGetErrorString(err) << endl; return -1; }
if (_profiling)
{
err = cudaDeviceSynchronize();
if (err != cudaSuccess)
{ cout << "err = " << cudaGetErrorString(err) << endl; return -1; }
sdkStopTimer(&timer);
//cout << "copyCt2C_Loopless[ " << j << " ] time = " << sdkGetTimerValue(&timer) << " ms" << endl;
}
return BHSPARSE_SUCCESS;
}
int bhsparse_cuda::copy_Ct_to_C_Loop(int num_threads, int num_blocks, int j, int position)
{
cudaError_t err = cudaSuccess;
StopWatchInterface *timer = NULL;
if (_profiling)
{
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
}
copyCt2C_Loop<<< num_blocks, num_threads >>>(_d_csrRowPtrC, _d_csrColIndC, _d_csrValC,
_d_csrRowPtrCt, _d_csrColIndCt, _d_csrValCt,
_d_queue_one, position);
err = cudaGetLastError();
if (err != cudaSuccess)
{ cout << "err = " << cudaGetErrorString(err) << endl; return -1; }
if (_profiling)
{
err = cudaDeviceSynchronize();
if (err != cudaSuccess)
{ cout << "err = " << cudaGetErrorString(err) << endl; return -1; }
sdkStopTimer(&timer);
//cout << "copyCt2C_Loop[ " << j << " ] time = " << sdkGetTimerValue(&timer) << " ms" << endl;
}
return BHSPARSE_SUCCESS;
}
int bhsparse_cuda::get_nnzC()
{
return _nnzC;
}
int bhsparse_cuda::get_C(index_type *csrColIndC, value_type *csrValC)
{
int err = 0;
checkCudaErrors(cudaMemcpy(csrColIndC, _d_csrColIndC, _nnzC * sizeof(index_type), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(_h_csrRowPtrC, _d_csrRowPtrC, (_m + 1) * sizeof(index_type), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(csrValC, _d_csrValC, _nnzC * sizeof(value_type), cudaMemcpyDeviceToHost));
return err;
}
|
93478f9bfd432e945f378101a0e183b184f03a9b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <cassert>
#include "arrayDeleter.h"
#include "rfhaps_gpu.h"
#include <algorithm>
#include "R.h"
#include <exception>
#include "getFunnelGPU.h"
using namespace std;
#define SAFE_EXIT( m )\
Rprintf("%s in file '%s' in line %i.\n", m, __FILE__, __LINE__);\
hipDeviceReset();\
exit(EXIT_FAILURE);
#define R_CUDA_SAFE_CALL( call )\
{\
hipError_t hipError_t = call ;\
if( hipError_t != hipSuccess ) {\
Rprintf("%s in file '%s' in line %i.\n", hipGetErrorString(hipError_t), __FILE__, __LINE__);\
hipDeviceReset();\
exit(EXIT_FAILURE);\
}\
}
void selectGPU(int deviceNum) {
int myDevice, numDevices;
R_CUDA_SAFE_CALL( hipGetDeviceCount( &numDevices ) );
if (deviceNum > numDevices) {
Rprintf("Unable to use device %i, only %i found.\n",deviceNum,numDevices);
}
if (deviceNum >= 0) {
// if caller specified a device then use it
R_CUDA_SAFE_CALL( hipSetDevice(deviceNum) );
} else if (deviceNum == -1) {
// take the first available (will share unless GPUs are in exclusive mode)
Rprintf("Selecting first available GPU.\n");
R_CUDA_SAFE_CALL( hipSetDevice(0) );
} else if (deviceNum == -2) {
// try some smarts to round robin assign devices based on the MPI local rank
char* cLocalRank;
int localRank = 0;
cLocalRank = getenv("OMPI_COMM_WORLD_LOCAL_RANK");
if (cLocalRank!=NULL) {
localRank = atoi(cLocalRank);
Rprintf("Local rank is: %i.\n",localRank);
} else {
Rprintf("Unable to determine local rank.\n");
}
R_CUDA_SAFE_CALL( hipSetDevice(localRank % numDevices) );
} else {
SAFE_EXIT("Unknown argument to selectGPU");
}
// which device did we end up with..
R_CUDA_SAFE_CALL( hipGetDevice( &myDevice ) );
Rprintf("Using device %i.\n",myDevice);
}
template<int nFounders> __device__ void pr2pt(double r, double *prob);
template<> __device__ void pr2pt<4>(double r, double *prob)
{
prob[0] = (1-r)/(4+8*r);
prob[1] = r/(4+8*r);
prob[2] = r/(4+8*r);
}
template<> __device__ void pr2pt<8>(double r, double *prob)
{
prob[0] = (1-r)*(1-r)/(8+16*r);
prob[1] = r*(1-r)/(8+16*r);
prob[2] = r/(16+32*r);
}
template<int nFounders>__device__ void pr2ptirip(double r, int s, double *prob);
template<> __device__ void pr2ptirip<4>(double r, int s, double *prob)
{
prob[0]=(pow(1-r, 2+s-1)/4+(2*r+1-pow(1-r, s-1))/16)/(1+2*r);
prob[1]=prob[2]=(1-4*prob[0])/12;
}
template<> __device__ void pr2ptirip<8>(double r, int s, double* prob)
{
double tmp = pow(1-r, s-1);
prob[0] = (tmp *(1-r)*(1-r)*(1-r)/8 + (2*r + 1 - tmp)/64)/(1 + 2*r);
prob[1] = prob[2] = (1 - 8 * prob[0]) / 56;
}
extern __shared__ char dyn_shared_mem[]; /* dynamic allocated shared memory */
template<int nFounders>
__global__ void gpu_rfhaps(int nRecomb, int* ngen,
int nPairs, int nFinals,
int *finalg, int* pair1, int* pair2,
double *thvec, int* markerPatternIDs, bool* allowableMarkerPatterns, int nMarkerPatterns, double* lineWeights, double* output) {
/*
* Mask is a matrix that looks something like
* nfounders = 8 nfounders = 4
* 01222222 0122
* 10222222 1022
* 22012222 2201
* 22102222 2210
* 22220122
* 22221022
* 22222201
* 22222210
*/
__shared__ int mask[8][8];
int g1[8];
int g2[8];
/* I suppose this could be done by differently..
* by having each thread copy a portion of the mask from device memory
* seems pointless unless the mask gets very large
* I assume doing it this way just increases the code size
*/
mask[0][0]=0; mask[0][1]=1; mask[0][2]=2; mask[0][3]=2; mask[0][4]=2; mask[0][5]=2; mask[0][6]=2; mask[0][7]=2;
mask[1][0]=1; mask[1][1]=0; mask[1][2]=2; mask[1][3]=2; mask[1][4]=2; mask[1][5]=2; mask[1][6]=2; mask[1][7]=2;
mask[2][0]=2; mask[2][1]=2; mask[2][2]=0; mask[2][3]=1; mask[2][4]=2; mask[2][5]=2; mask[2][6]=2; mask[2][7]=2;
mask[3][0]=2; mask[3][1]=2; mask[3][2]=1; mask[3][3]=0; mask[3][4]=2; mask[3][5]=2; mask[3][6]=2; mask[3][7]=2;
mask[4][0]=2; mask[4][1]=2; mask[4][2]=2; mask[4][3]=2; mask[4][4]=0; mask[4][5]=1; mask[4][6]=2; mask[4][7]=2;
mask[5][0]=2; mask[5][1]=2; mask[5][2]=2; mask[5][3]=2; mask[5][4]=1; mask[5][5]=0; mask[5][6]=2; mask[5][7]=2;
mask[6][0]=2; mask[6][1]=2; mask[6][2]=2; mask[6][3]=2; mask[6][4]=2; mask[6][5]=2; mask[6][6]=0; mask[6][7]=1;
mask[7][0]=2; mask[7][1]=2; mask[7][2]=2; mask[7][3]=2; mask[7][4]=2; mask[7][5]=2; mask[7][6]=1; mask[7][7]=0;
double *shm_thvec = (double*)dyn_shared_mem; /* dynamically allocated shared memory */
shm_thvec[threadIdx.x] = thvec[threadIdx.x];
__syncthreads();
/* work out which part of the r and k loops in the CPU implementation
* we are responsible for
*/
int r = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockIdx.y * blockDim.y + threadIdx.y;
if (k>=nPairs) return;
if (r>=nRecomb) return;
for (int i = 0; i < nFinals; i++) {
// assert(k >= 0 && i >= 0);
// assert(k < npairs && i < nfinals);
/* TODO something seems to be corrupt in the finalg data
* need to find the cause of these rogue indiv values */
int p1 = pair1[k];
int p2 = pair2[k];
// assert(p1 >= 0 && p2 >= 0);
// assert(p1 < nmrk);
// assert(p2 < nmrk);
for (int j=0; j<nFounders; j++)
g1[j] = g2[j] = 0;
/* point to start of genotypes for the individual */
int h1 = finalg[p1*nFinals+i];
int h2 = finalg[p2*nFinals+i];
if ((h1>0)*(h2>0)) { /* check for missing values */
double theta = shm_thvec[r];
double probclass[3];
if ((h1&1) == 1) {g1[0]=1; h1 -= 1; }
if ((h1&3) == 2) {g1[1]=1; h1 -= 2; }
if ((h1&7) == 4) {g1[2]=1; h1 -= 4; }
if ((h1&15) == 8){g1[3]=1; h1 -= 8; }
if ((h1&31) == 16){g1[4]=1; h1 -= 16;}
if ((h1&63) == 32){g1[5]=1; h1 -= 32;}
if ((h1&127) == 64){g1[6]=1; h1 -= 64;}
if ((h1&255) == 128){g1[7]=1;}
if ((h2&1) == 1) {g2[0]=1; h2 -= 1; }
if ((h2&3) == 2) {g2[1]=1; h2 -= 2; }
if ((h2&7) == 4) {g2[2]=1; h2 -= 4; }
if ((h2&15) == 8){g2[3]=1; h2 -= 8; }
if ((h2&31) == 16){g2[4]=1; h2 -= 16;}
if ((h2&63) == 32){g2[5]=1; h2 -= 32;}
if ((h2&127) == 64){g2[6]=1; h2 -= 64;}
if ((h2&255) == 128){g2[7]=1;}
/* Compute haplotype probabilities based on theta */
/* TODO template gpu_rfhaps based on ngen
* this will allow us to use 7 fewer registers
* for the case when ngen == 0
* and 2 fewer when ngen > 0
* it adds annoying complexity to the kernel invocation call portion though
*/
if(ngen[i] == 0)
{
pr2pt<nFounders>(theta, probclass);
}
else
{
pr2ptirip<nFounders>(theta, ngen[i], probclass);
}
/* Check whether progeny genotypes are compatible with parent genotypes */
/* For each combination of haplotypes which is feasible
* add the haplotype probabilities together */
double hp = 0;
for (int j=0; j<nFounders; j++){
for (int l=0; l<nFounders; l++){
if (g1[j]*g2[l]) {
hp += probclass[mask[j][l]];
}
}
}
/* log10(hp) is the individual contribution to the log-likelihood */
output[k*nRecomb+r] += (allowableMarkerPatterns[nMarkerPatterns * markerPatternIDs[p1] + markerPatternIDs[p2]] ? lineWeights[i]*log10(hp) : 0);
} // end of check for missing values
}
}
struct rfhaps_gpu_internal_args
{
int* pair1, *pair2;
long pairsOffset, nPairsToCalculate;
int nMarkers, nFinals, recombOffset, nRecombToCalculate, nRecomb, nFounders;
int* finalsD, *nIntercrossingD;
double* recombinationFractionsD;
bool hasAI;
double* output;
int marker2RangeSize, marker1Start, marker2Start;
int* markerPatternIDs;
bool* allowableMarkerPatterns;
int nMarkerPatterns;
double* lineWeightsD;
};
pedigreeColumns::pedigreeColumns(int* id, int* Male, int* Female, int* Observed, std::vector<std::string>& Design)
: id(id), Male(Male), Female(Female), Observed(Observed), Design(Design)
{}
__host__ bool rfhaps_gpu_internal(rfhaps_gpu_internal_args& args)
{
assert(args.nFounders == 4 || args.nFounders == 8);
const long resultSize = args.nPairsToCalculate * args.nRecombToCalculate;
double* outputD;
hipMalloc((void**)&outputD, resultSize*sizeof(double));
//hipMemcpy(outputD, args.output, sizeof(double)*resultSize, hipMemcpyHostToDevice);
hipMemset(outputD, 0, sizeof(double)*resultSize);
//transfer pairs data
int* pair1D, *pair2D;
hipMalloc((int**)&pair1D, args.nPairsToCalculate * sizeof(int));
hipMemcpy(pair1D, args.pair1 + args.pairsOffset, args.nPairsToCalculate*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((int**)&pair2D, args.nPairsToCalculate * sizeof(int));
hipMemcpy(pair2D, args.pair2 + args.pairsOffset, args.nPairsToCalculate*sizeof(int), hipMemcpyHostToDevice);
int threadsX = args.nRecombToCalculate;
int threadsY = args.nPairsToCalculate;
dim3 dimBlock(threadsX, floor(440/threadsX)); // logical max of 512 threads per block (only 440 per multiprocessor on Fermi anyway)
dim3 dimGrid(1, ceil((double)threadsY / (double)dimBlock.y));
size_t dynSharedSize = sizeof(double)*args.nRecombToCalculate;
if (args.nFounders==4)
{
hipLaunchKernelGGL(( gpu_rfhaps<4>), dim3(dimGrid), dim3(dimBlock), dynSharedSize, 0, args.nRecombToCalculate, args.nIntercrossingD, args.nPairsToCalculate, args.nFinals, args.finalsD, pair1D, pair2D, args.recombinationFractionsD + args.recombOffset, args.markerPatternIDs, args.allowableMarkerPatterns, args.nMarkerPatterns, args.lineWeightsD, outputD);
}
else if (args.nFounders==8)
{
hipLaunchKernelGGL(( gpu_rfhaps<8>), dim3(dimGrid), dim3(dimBlock), dynSharedSize, 0, args.nRecombToCalculate, args.nIntercrossingD, args.nPairsToCalculate, args.nFinals, args.finalsD, pair1D, pair2D, args.recombinationFractionsD + args.recombOffset, args.markerPatternIDs, args.allowableMarkerPatterns, args.nMarkerPatterns, args.lineWeightsD, outputD);
}
else
{
Rprintf("nFounders must have value 4 or 8\n");
exit(-1);
}
bool result = true;
hipDeviceSynchronize();
hipError_t lastError = hipGetLastError();
if(lastError != hipSuccess)
{
Rprintf("CUDA Last Error: %s\n",hipGetErrorString(lastError));
result = false;
}
double* copiedOutput = new double[resultSize];;
hipMemcpy(copiedOutput, outputD, resultSize*sizeof(double), hipMemcpyDeviceToHost);
for(int pairCounter = 0; pairCounter < args.nPairsToCalculate; pairCounter++)
{
int markerCounter2 = args.pair2[pairCounter + args.pairsOffset];
int markerCounter1 = args.pair1[pairCounter + args.pairsOffset];
for(int recombCounter = 0; recombCounter < args.nRecombToCalculate; recombCounter++)
{
//Turns out this overflows the range of a signed int
long index = (long)(markerCounter1 - args.marker1Start)*(long)args.nRecomb*(long)args.marker2RangeSize + (long)(markerCounter2-args.marker2Start)*(long)args.nRecomb + (long)(recombCounter+args.recombOffset);
args.output[index] += copiedOutput[(long)pairCounter*(long)args.nRecombToCalculate + (long)recombCounter];
}
}
hipFree(outputD);
hipFree(pair1D);
hipFree(pair2D);
delete[] copiedOutput;
return result;
}
extern "C" __host__ bool rfhaps_gpu(rfhaps_gpu_args& args)
{
selectGPU(args.deviceNum);
int marker2RangeSize = args.marker2End - args.marker2Start, marker1RangeSize = args.marker1End - args.marker1Start;
int nMarkers = args.markerPatternIDs.size();
int nMarkerPatterns = args.markerEncodings.size();
const int finalsSize = nMarkers * args.nFinals;
int* copiedFinals = new int[finalsSize];
assert(args.nFounders == 4 || args.nFounders == 8);
//working out the number of pairs is complicated because if we have a region on the diagonal we use the symmetry to avoid making double calculations. Whereas if we have a bit on the
//off-diagonal we need to calculate every value
int maxStart = ::max(args.marker1Start, args.marker2Start);
int minEnd = ::min(args.marker1End, args.marker2End);
long square = ::max(minEnd - maxStart, 0);
long squarePairs = square*(square + 1) /2;
long nPairs = (marker1RangeSize * marker2RangeSize) - square * square + squarePairs;
//re-encode finals genetic data, so that now the 1st bit says whether that individual is compatible with founder 1, 2nd bit compatible with founder 2, etc
for(int individualCounter = 0; individualCounter < args.nFinals; individualCounter++)
{
int funnel[8];
funnelID currentIndividualFunnelID = args.funnelIDs[individualCounter];
funnelEncoding enc = args.funnelEncodings[currentIndividualFunnelID];
for(int founderCounter = 0; founderCounter < args.nFounders; founderCounter++)
{
funnel[founderCounter] = ((enc & (7 << (3*founderCounter))) >> (3*founderCounter));
}
for(int markerCounter = 0; markerCounter < nMarkers; markerCounter++)
{
int newValue = 0;
int oldValue = args.finals[individualCounter+args.nFinals*markerCounter];
for(int founderCounter = 0; founderCounter < args.nFounders; founderCounter++)
{
if(oldValue == args.founders[funnel[founderCounter] + args.nFounders*markerCounter]) newValue += (1 << founderCounter);
}
copiedFinals[individualCounter+args.nFinals*markerCounter] = newValue;
}
}
//transfer intercrossing data
int* nIntercrossingD;
hipError_t cudaAllocResult = hipMalloc((void**)&nIntercrossingD, args.nFinals * sizeof(int));
if(cudaAllocResult != hipSuccess)
{
Rprintf("Error calling hipMalloc with %d bytes: %s\n", args.nFinals * sizeof(int), hipGetErrorString(cudaAllocResult));
return false;
}
else
{
Rprintf("Allocated %d bytes\n", args.nFinals * sizeof(int));
}
hipMemcpy(nIntercrossingD, args.nIntercrossing, args.nFinals * sizeof(int), hipMemcpyHostToDevice);
int* pair1 = new int[nPairs], *pair2 = new int[nPairs];
int* p1Ptr = pair1, *p2Ptr = pair2;
//generate pairs
for(int i = args.marker1Start; i < args.marker1End; i++)
{
for(int j = args.marker2Start; j < args.marker2End; j++)
{
if(i >= maxStart && i < minEnd && j >= maxStart && j < minEnd && j < i) continue;
*p2Ptr = j;
*p1Ptr = i;
p1Ptr++; p2Ptr++;
}
}
//copy across final genetic data
int* finalsD;
cudaAllocResult = hipMalloc((void**)&finalsD, finalsSize * sizeof(int));
if(cudaAllocResult != hipSuccess)
{
Rprintf("Error calling hipMalloc with %d bytes: %s\n", finalsSize * sizeof(int), hipGetErrorString(cudaAllocResult));
return false;
}
else
{
Rprintf("Allocated %d bytes\n", finalsSize * sizeof(int));
}
hipMemcpy(finalsD, copiedFinals, finalsSize * sizeof(int), hipMemcpyHostToDevice);
delete[] copiedFinals;
//copy across recombination fractions
double* recombinationFractionsD;
cudaAllocResult = hipMalloc((void**)&recombinationFractionsD, args.nRecomb * sizeof(double));
if(cudaAllocResult != hipSuccess)
{
Rprintf("Error calling hipMalloc with %d bytes: %s\n", args.nRecomb * sizeof(double), hipGetErrorString(cudaAllocResult));
return false;
}
else
{
Rprintf("Allocated %d bytes\n", args.nRecomb * sizeof(double));
}
hipMemcpy(recombinationFractionsD, args.recombination, args.nRecomb * sizeof(double), hipMemcpyHostToDevice);
//copy across the allowable marker patterns data
bool* allowableMarkerPatternsD;
cudaAllocResult = hipMalloc((void**)&allowableMarkerPatternsD, nMarkerPatterns * nMarkerPatterns * sizeof(bool));
if(cudaAllocResult != hipSuccess)
{
Rprintf("Error calling hipMalloc with %d bytes: %s\n", nMarkerPatterns * nMarkerPatterns*sizeof(bool), hipGetErrorString(cudaAllocResult));
return false;
}
else
{
Rprintf("Allocated %d bytes\n", nMarkerPatterns * nMarkerPatterns * sizeof(bool));
}
hipMemcpy(allowableMarkerPatternsD, args.allowableMarkerPatterns, nMarkerPatterns * nMarkerPatterns * sizeof(bool), hipMemcpyHostToDevice);
int* markerPatternIDsD;
hipMalloc((void**)&markerPatternIDsD, args.markerPatternIDs.size() * sizeof(int));
hipMemcpy(markerPatternIDsD, &(args.markerPatternIDs[0]), args.markerPatternIDs.size() * sizeof(int), hipMemcpyHostToDevice);
//copy across line weights data
double* lineWeightsD;
hipMalloc((void**)&lineWeightsD, args.lineWeights.size() * sizeof(double));
hipMemcpy(lineWeightsD, &(args.lineWeights[0]), args.lineWeights.size() * sizeof(double), hipMemcpyHostToDevice);
int threadsX = args.nRecomb;
int threadsY = nPairs;
dim3 dimBlock(threadsX, floor(440/threadsX)); // logical max of 512 threads per block (only 440 per multiprocessor on Fermi anyway)
dim3 dimGrid(1, ceil((double)threadsY / (double)dimBlock.y));
int donePairs = 0;
long pairsPerCall = nPairs;
if(dimGrid.y > 65535)
{
pairsPerCall = 65535 * dimBlock.y;
dimGrid.y = pairsPerCall/dimBlock.y;
threadsY = pairsPerCall;
Rprintf("Splitting into %ld cuda calls....\n", (long)((nPairs+pairsPerCall-1)/pairsPerCall));
}
int requiredThreads = threadsX*threadsY;
int totalThreads = dimBlock.x*dimGrid.x*dimBlock.y*dimGrid.y;
Rprintf("Total threads needed = %d\n",requiredThreads);
Rprintf("Threads in grid = %d\n",totalThreads);
Rprintf("Surplus threads = %d\n\n", totalThreads - requiredThreads); /* these will need to just sit idle */
Rprintf("Threads per block %d x %d = %d\n",dimBlock.x,dimBlock.y,dimBlock.x*dimBlock.y);
Rprintf("Blocks in grid %d x %d = %d\n",dimGrid.x,dimGrid.y,dimGrid.x*dimGrid.y);
//END_DEBUG
rfhaps_gpu_internal_args internal_args;
internal_args.pair1 = pair1;
internal_args.pair2 = pair2;
internal_args.nMarkers = nMarkers;
internal_args.nFinals = args.nFinals;
internal_args.recombOffset = 0;
internal_args.nRecombToCalculate = args.nRecomb;
internal_args.nRecomb = args.nRecomb;
internal_args.finalsD = finalsD;
internal_args.nIntercrossingD = nIntercrossingD;
internal_args.recombinationFractionsD = recombinationFractionsD;
internal_args.hasAI = args.hasAI;
internal_args.nFounders = args.nFounders;
internal_args.output = args.output;
internal_args.marker2RangeSize = marker2RangeSize;
internal_args.marker1Start = args.marker1Start;
internal_args.marker2Start = args.marker2Start;
internal_args.markerPatternIDs = markerPatternIDsD;
internal_args.allowableMarkerPatterns = allowableMarkerPatternsD;
internal_args.nMarkerPatterns = nMarkerPatterns;
internal_args.lineWeightsD = lineWeightsD;
int counter = 0;
while(donePairs < nPairs)
{
Rprintf("Making cuda call %d\n", counter+1);
internal_args.pairsOffset = donePairs;
if(donePairs + pairsPerCall >= nPairs)
{
internal_args.nPairsToCalculate = nPairs - donePairs;
}
else internal_args.nPairsToCalculate = pairsPerCall;
bool result = rfhaps_gpu_internal(internal_args);
if(!result)
{
Rprintf("A CUDA call failed, exiting...\n");
return false;
}
donePairs += internal_args.nPairsToCalculate;
counter++;
}
delete[] pair1;
delete[] pair2;
Rprintf("Finished all CUDA calls\n");
hipFree(finalsD);
hipFree(lineWeightsD);
hipFree(nIntercrossingD);
hipFree(recombinationFractionsD);
return true;
}
| 93478f9bfd432e945f378101a0e183b184f03a9b.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <iostream>
#include <cassert>
#include "arrayDeleter.h"
#include "rfhaps_gpu.h"
#include <algorithm>
#include "R.h"
#include <exception>
#include "getFunnelGPU.h"
using namespace std;
#define SAFE_EXIT( m )\
Rprintf("%s in file '%s' in line %i.\n", m, __FILE__, __LINE__);\
cudaDeviceReset();\
exit(EXIT_FAILURE);
#define R_CUDA_SAFE_CALL( call )\
{\
cudaError_t cudaError = call ;\
if( cudaError != cudaSuccess ) {\
Rprintf("%s in file '%s' in line %i.\n", cudaGetErrorString(cudaError), __FILE__, __LINE__);\
cudaDeviceReset();\
exit(EXIT_FAILURE);\
}\
}
void selectGPU(int deviceNum) {
int myDevice, numDevices;
R_CUDA_SAFE_CALL( cudaGetDeviceCount( &numDevices ) );
if (deviceNum > numDevices) {
Rprintf("Unable to use device %i, only %i found.\n",deviceNum,numDevices);
}
if (deviceNum >= 0) {
// if caller specified a device then use it
R_CUDA_SAFE_CALL( cudaSetDevice(deviceNum) );
} else if (deviceNum == -1) {
// take the first available (will share unless GPUs are in exclusive mode)
Rprintf("Selecting first available GPU.\n");
R_CUDA_SAFE_CALL( cudaSetDevice(0) );
} else if (deviceNum == -2) {
// try some smarts to round robin assign devices based on the MPI local rank
char* cLocalRank;
int localRank = 0;
cLocalRank = getenv("OMPI_COMM_WORLD_LOCAL_RANK");
if (cLocalRank!=NULL) {
localRank = atoi(cLocalRank);
Rprintf("Local rank is: %i.\n",localRank);
} else {
Rprintf("Unable to determine local rank.\n");
}
R_CUDA_SAFE_CALL( cudaSetDevice(localRank % numDevices) );
} else {
SAFE_EXIT("Unknown argument to selectGPU");
}
// which device did we end up with..
R_CUDA_SAFE_CALL( cudaGetDevice( &myDevice ) );
Rprintf("Using device %i.\n",myDevice);
}
template<int nFounders> __device__ void pr2pt(double r, double *prob);
template<> __device__ void pr2pt<4>(double r, double *prob)
{
prob[0] = (1-r)/(4+8*r);
prob[1] = r/(4+8*r);
prob[2] = r/(4+8*r);
}
template<> __device__ void pr2pt<8>(double r, double *prob)
{
prob[0] = (1-r)*(1-r)/(8+16*r);
prob[1] = r*(1-r)/(8+16*r);
prob[2] = r/(16+32*r);
}
template<int nFounders>__device__ void pr2ptirip(double r, int s, double *prob);
template<> __device__ void pr2ptirip<4>(double r, int s, double *prob)
{
prob[0]=(pow(1-r, 2+s-1)/4+(2*r+1-pow(1-r, s-1))/16)/(1+2*r);
prob[1]=prob[2]=(1-4*prob[0])/12;
}
template<> __device__ void pr2ptirip<8>(double r, int s, double* prob)
{
double tmp = pow(1-r, s-1);
prob[0] = (tmp *(1-r)*(1-r)*(1-r)/8 + (2*r + 1 - tmp)/64)/(1 + 2*r);
prob[1] = prob[2] = (1 - 8 * prob[0]) / 56;
}
extern __shared__ char dyn_shared_mem[]; /* dynamic allocated shared memory */
template<int nFounders>
__global__ void gpu_rfhaps(int nRecomb, int* ngen,
int nPairs, int nFinals,
int *finalg, int* pair1, int* pair2,
double *thvec, int* markerPatternIDs, bool* allowableMarkerPatterns, int nMarkerPatterns, double* lineWeights, double* output) {
/*
* Mask is a matrix that looks something like
* nfounders = 8 nfounders = 4
* 01222222 0122
* 10222222 1022
* 22012222 2201
* 22102222 2210
* 22220122
* 22221022
* 22222201
* 22222210
*/
__shared__ int mask[8][8];
int g1[8];
int g2[8];
/* I suppose this could be done by differently..
* by having each thread copy a portion of the mask from device memory
* seems pointless unless the mask gets very large
* I assume doing it this way just increases the code size
*/
mask[0][0]=0; mask[0][1]=1; mask[0][2]=2; mask[0][3]=2; mask[0][4]=2; mask[0][5]=2; mask[0][6]=2; mask[0][7]=2;
mask[1][0]=1; mask[1][1]=0; mask[1][2]=2; mask[1][3]=2; mask[1][4]=2; mask[1][5]=2; mask[1][6]=2; mask[1][7]=2;
mask[2][0]=2; mask[2][1]=2; mask[2][2]=0; mask[2][3]=1; mask[2][4]=2; mask[2][5]=2; mask[2][6]=2; mask[2][7]=2;
mask[3][0]=2; mask[3][1]=2; mask[3][2]=1; mask[3][3]=0; mask[3][4]=2; mask[3][5]=2; mask[3][6]=2; mask[3][7]=2;
mask[4][0]=2; mask[4][1]=2; mask[4][2]=2; mask[4][3]=2; mask[4][4]=0; mask[4][5]=1; mask[4][6]=2; mask[4][7]=2;
mask[5][0]=2; mask[5][1]=2; mask[5][2]=2; mask[5][3]=2; mask[5][4]=1; mask[5][5]=0; mask[5][6]=2; mask[5][7]=2;
mask[6][0]=2; mask[6][1]=2; mask[6][2]=2; mask[6][3]=2; mask[6][4]=2; mask[6][5]=2; mask[6][6]=0; mask[6][7]=1;
mask[7][0]=2; mask[7][1]=2; mask[7][2]=2; mask[7][3]=2; mask[7][4]=2; mask[7][5]=2; mask[7][6]=1; mask[7][7]=0;
double *shm_thvec = (double*)dyn_shared_mem; /* dynamically allocated shared memory */
shm_thvec[threadIdx.x] = thvec[threadIdx.x];
__syncthreads();
/* work out which part of the r and k loops in the CPU implementation
* we are responsible for
*/
int r = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockIdx.y * blockDim.y + threadIdx.y;
if (k>=nPairs) return;
if (r>=nRecomb) return;
for (int i = 0; i < nFinals; i++) {
// assert(k >= 0 && i >= 0);
// assert(k < npairs && i < nfinals);
/* TODO something seems to be corrupt in the finalg data
* need to find the cause of these rogue indiv values */
int p1 = pair1[k];
int p2 = pair2[k];
// assert(p1 >= 0 && p2 >= 0);
// assert(p1 < nmrk);
// assert(p2 < nmrk);
for (int j=0; j<nFounders; j++)
g1[j] = g2[j] = 0;
/* point to start of genotypes for the individual */
int h1 = finalg[p1*nFinals+i];
int h2 = finalg[p2*nFinals+i];
if ((h1>0)*(h2>0)) { /* check for missing values */
double theta = shm_thvec[r];
double probclass[3];
if ((h1&1) == 1) {g1[0]=1; h1 -= 1; }
if ((h1&3) == 2) {g1[1]=1; h1 -= 2; }
if ((h1&7) == 4) {g1[2]=1; h1 -= 4; }
if ((h1&15) == 8){g1[3]=1; h1 -= 8; }
if ((h1&31) == 16){g1[4]=1; h1 -= 16;}
if ((h1&63) == 32){g1[5]=1; h1 -= 32;}
if ((h1&127) == 64){g1[6]=1; h1 -= 64;}
if ((h1&255) == 128){g1[7]=1;}
if ((h2&1) == 1) {g2[0]=1; h2 -= 1; }
if ((h2&3) == 2) {g2[1]=1; h2 -= 2; }
if ((h2&7) == 4) {g2[2]=1; h2 -= 4; }
if ((h2&15) == 8){g2[3]=1; h2 -= 8; }
if ((h2&31) == 16){g2[4]=1; h2 -= 16;}
if ((h2&63) == 32){g2[5]=1; h2 -= 32;}
if ((h2&127) == 64){g2[6]=1; h2 -= 64;}
if ((h2&255) == 128){g2[7]=1;}
/* Compute haplotype probabilities based on theta */
/* TODO template gpu_rfhaps based on ngen
* this will allow us to use 7 fewer registers
* for the case when ngen == 0
* and 2 fewer when ngen > 0
* it adds annoying complexity to the kernel invocation call portion though
*/
if(ngen[i] == 0)
{
pr2pt<nFounders>(theta, probclass);
}
else
{
pr2ptirip<nFounders>(theta, ngen[i], probclass);
}
/* Check whether progeny genotypes are compatible with parent genotypes */
/* For each combination of haplotypes which is feasible
* add the haplotype probabilities together */
double hp = 0;
for (int j=0; j<nFounders; j++){
for (int l=0; l<nFounders; l++){
if (g1[j]*g2[l]) {
hp += probclass[mask[j][l]];
}
}
}
/* log10(hp) is the individual contribution to the log-likelihood */
output[k*nRecomb+r] += (allowableMarkerPatterns[nMarkerPatterns * markerPatternIDs[p1] + markerPatternIDs[p2]] ? lineWeights[i]*log10(hp) : 0);
} // end of check for missing values
}
}
struct rfhaps_gpu_internal_args
{
int* pair1, *pair2;
long pairsOffset, nPairsToCalculate;
int nMarkers, nFinals, recombOffset, nRecombToCalculate, nRecomb, nFounders;
int* finalsD, *nIntercrossingD;
double* recombinationFractionsD;
bool hasAI;
double* output;
int marker2RangeSize, marker1Start, marker2Start;
int* markerPatternIDs;
bool* allowableMarkerPatterns;
int nMarkerPatterns;
double* lineWeightsD;
};
pedigreeColumns::pedigreeColumns(int* id, int* Male, int* Female, int* Observed, std::vector<std::string>& Design)
: id(id), Male(Male), Female(Female), Observed(Observed), Design(Design)
{}
__host__ bool rfhaps_gpu_internal(rfhaps_gpu_internal_args& args)
{
assert(args.nFounders == 4 || args.nFounders == 8);
const long resultSize = args.nPairsToCalculate * args.nRecombToCalculate;
double* outputD;
cudaMalloc((void**)&outputD, resultSize*sizeof(double));
//cudaMemcpy(outputD, args.output, sizeof(double)*resultSize, cudaMemcpyHostToDevice);
cudaMemset(outputD, 0, sizeof(double)*resultSize);
//transfer pairs data
int* pair1D, *pair2D;
cudaMalloc((int**)&pair1D, args.nPairsToCalculate * sizeof(int));
cudaMemcpy(pair1D, args.pair1 + args.pairsOffset, args.nPairsToCalculate*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((int**)&pair2D, args.nPairsToCalculate * sizeof(int));
cudaMemcpy(pair2D, args.pair2 + args.pairsOffset, args.nPairsToCalculate*sizeof(int), cudaMemcpyHostToDevice);
int threadsX = args.nRecombToCalculate;
int threadsY = args.nPairsToCalculate;
dim3 dimBlock(threadsX, floor(440/threadsX)); // logical max of 512 threads per block (only 440 per multiprocessor on Fermi anyway)
dim3 dimGrid(1, ceil((double)threadsY / (double)dimBlock.y));
size_t dynSharedSize = sizeof(double)*args.nRecombToCalculate;
if (args.nFounders==4)
{
gpu_rfhaps<4><<<dimGrid, dimBlock, dynSharedSize>>>(args.nRecombToCalculate, args.nIntercrossingD, args.nPairsToCalculate, args.nFinals, args.finalsD, pair1D, pair2D, args.recombinationFractionsD + args.recombOffset, args.markerPatternIDs, args.allowableMarkerPatterns, args.nMarkerPatterns, args.lineWeightsD, outputD);
}
else if (args.nFounders==8)
{
gpu_rfhaps<8><<<dimGrid, dimBlock, dynSharedSize>>>(args.nRecombToCalculate, args.nIntercrossingD, args.nPairsToCalculate, args.nFinals, args.finalsD, pair1D, pair2D, args.recombinationFractionsD + args.recombOffset, args.markerPatternIDs, args.allowableMarkerPatterns, args.nMarkerPatterns, args.lineWeightsD, outputD);
}
else
{
Rprintf("nFounders must have value 4 or 8\n");
exit(-1);
}
bool result = true;
cudaThreadSynchronize();
cudaError_t lastError = cudaGetLastError();
if(lastError != cudaSuccess)
{
Rprintf("CUDA Last Error: %s\n",cudaGetErrorString(lastError));
result = false;
}
double* copiedOutput = new double[resultSize];;
cudaMemcpy(copiedOutput, outputD, resultSize*sizeof(double), cudaMemcpyDeviceToHost);
for(int pairCounter = 0; pairCounter < args.nPairsToCalculate; pairCounter++)
{
int markerCounter2 = args.pair2[pairCounter + args.pairsOffset];
int markerCounter1 = args.pair1[pairCounter + args.pairsOffset];
for(int recombCounter = 0; recombCounter < args.nRecombToCalculate; recombCounter++)
{
//Turns out this overflows the range of a signed int
long index = (long)(markerCounter1 - args.marker1Start)*(long)args.nRecomb*(long)args.marker2RangeSize + (long)(markerCounter2-args.marker2Start)*(long)args.nRecomb + (long)(recombCounter+args.recombOffset);
args.output[index] += copiedOutput[(long)pairCounter*(long)args.nRecombToCalculate + (long)recombCounter];
}
}
cudaFree(outputD);
cudaFree(pair1D);
cudaFree(pair2D);
delete[] copiedOutput;
return result;
}
extern "C" __host__ bool rfhaps_gpu(rfhaps_gpu_args& args)
{
selectGPU(args.deviceNum);
int marker2RangeSize = args.marker2End - args.marker2Start, marker1RangeSize = args.marker1End - args.marker1Start;
int nMarkers = args.markerPatternIDs.size();
int nMarkerPatterns = args.markerEncodings.size();
const int finalsSize = nMarkers * args.nFinals;
int* copiedFinals = new int[finalsSize];
assert(args.nFounders == 4 || args.nFounders == 8);
//working out the number of pairs is complicated because if we have a region on the diagonal we use the symmetry to avoid making double calculations. Whereas if we have a bit on the
//off-diagonal we need to calculate every value
int maxStart = std::max(args.marker1Start, args.marker2Start);
int minEnd = std::min(args.marker1End, args.marker2End);
long square = std::max(minEnd - maxStart, 0);
long squarePairs = square*(square + 1) /2;
long nPairs = (marker1RangeSize * marker2RangeSize) - square * square + squarePairs;
//re-encode finals genetic data, so that now the 1st bit says whether that individual is compatible with founder 1, 2nd bit compatible with founder 2, etc
for(int individualCounter = 0; individualCounter < args.nFinals; individualCounter++)
{
int funnel[8];
funnelID currentIndividualFunnelID = args.funnelIDs[individualCounter];
funnelEncoding enc = args.funnelEncodings[currentIndividualFunnelID];
for(int founderCounter = 0; founderCounter < args.nFounders; founderCounter++)
{
funnel[founderCounter] = ((enc & (7 << (3*founderCounter))) >> (3*founderCounter));
}
for(int markerCounter = 0; markerCounter < nMarkers; markerCounter++)
{
int newValue = 0;
int oldValue = args.finals[individualCounter+args.nFinals*markerCounter];
for(int founderCounter = 0; founderCounter < args.nFounders; founderCounter++)
{
if(oldValue == args.founders[funnel[founderCounter] + args.nFounders*markerCounter]) newValue += (1 << founderCounter);
}
copiedFinals[individualCounter+args.nFinals*markerCounter] = newValue;
}
}
//transfer intercrossing data
int* nIntercrossingD;
cudaError_t cudaAllocResult = cudaMalloc((void**)&nIntercrossingD, args.nFinals * sizeof(int));
if(cudaAllocResult != cudaSuccess)
{
Rprintf("Error calling cudaMalloc with %d bytes: %s\n", args.nFinals * sizeof(int), cudaGetErrorString(cudaAllocResult));
return false;
}
else
{
Rprintf("Allocated %d bytes\n", args.nFinals * sizeof(int));
}
cudaMemcpy(nIntercrossingD, args.nIntercrossing, args.nFinals * sizeof(int), cudaMemcpyHostToDevice);
int* pair1 = new int[nPairs], *pair2 = new int[nPairs];
int* p1Ptr = pair1, *p2Ptr = pair2;
//generate pairs
for(int i = args.marker1Start; i < args.marker1End; i++)
{
for(int j = args.marker2Start; j < args.marker2End; j++)
{
if(i >= maxStart && i < minEnd && j >= maxStart && j < minEnd && j < i) continue;
*p2Ptr = j;
*p1Ptr = i;
p1Ptr++; p2Ptr++;
}
}
//copy across final genetic data
int* finalsD;
cudaAllocResult = cudaMalloc((void**)&finalsD, finalsSize * sizeof(int));
if(cudaAllocResult != cudaSuccess)
{
Rprintf("Error calling cudaMalloc with %d bytes: %s\n", finalsSize * sizeof(int), cudaGetErrorString(cudaAllocResult));
return false;
}
else
{
Rprintf("Allocated %d bytes\n", finalsSize * sizeof(int));
}
cudaMemcpy(finalsD, copiedFinals, finalsSize * sizeof(int), cudaMemcpyHostToDevice);
delete[] copiedFinals;
//copy across recombination fractions
double* recombinationFractionsD;
cudaAllocResult = cudaMalloc((void**)&recombinationFractionsD, args.nRecomb * sizeof(double));
if(cudaAllocResult != cudaSuccess)
{
Rprintf("Error calling cudaMalloc with %d bytes: %s\n", args.nRecomb * sizeof(double), cudaGetErrorString(cudaAllocResult));
return false;
}
else
{
Rprintf("Allocated %d bytes\n", args.nRecomb * sizeof(double));
}
cudaMemcpy(recombinationFractionsD, args.recombination, args.nRecomb * sizeof(double), cudaMemcpyHostToDevice);
//copy across the allowable marker patterns data
bool* allowableMarkerPatternsD;
cudaAllocResult = cudaMalloc((void**)&allowableMarkerPatternsD, nMarkerPatterns * nMarkerPatterns * sizeof(bool));
if(cudaAllocResult != cudaSuccess)
{
Rprintf("Error calling cudaMalloc with %d bytes: %s\n", nMarkerPatterns * nMarkerPatterns*sizeof(bool), cudaGetErrorString(cudaAllocResult));
return false;
}
else
{
Rprintf("Allocated %d bytes\n", nMarkerPatterns * nMarkerPatterns * sizeof(bool));
}
cudaMemcpy(allowableMarkerPatternsD, args.allowableMarkerPatterns, nMarkerPatterns * nMarkerPatterns * sizeof(bool), cudaMemcpyHostToDevice);
int* markerPatternIDsD;
cudaMalloc((void**)&markerPatternIDsD, args.markerPatternIDs.size() * sizeof(int));
cudaMemcpy(markerPatternIDsD, &(args.markerPatternIDs[0]), args.markerPatternIDs.size() * sizeof(int), cudaMemcpyHostToDevice);
//copy across line weights data
double* lineWeightsD;
cudaMalloc((void**)&lineWeightsD, args.lineWeights.size() * sizeof(double));
cudaMemcpy(lineWeightsD, &(args.lineWeights[0]), args.lineWeights.size() * sizeof(double), cudaMemcpyHostToDevice);
int threadsX = args.nRecomb;
int threadsY = nPairs;
dim3 dimBlock(threadsX, floor(440/threadsX)); // logical max of 512 threads per block (only 440 per multiprocessor on Fermi anyway)
dim3 dimGrid(1, ceil((double)threadsY / (double)dimBlock.y));
int donePairs = 0;
long pairsPerCall = nPairs;
if(dimGrid.y > 65535)
{
pairsPerCall = 65535 * dimBlock.y;
dimGrid.y = pairsPerCall/dimBlock.y;
threadsY = pairsPerCall;
Rprintf("Splitting into %ld cuda calls....\n", (long)((nPairs+pairsPerCall-1)/pairsPerCall));
}
int requiredThreads = threadsX*threadsY;
int totalThreads = dimBlock.x*dimGrid.x*dimBlock.y*dimGrid.y;
Rprintf("Total threads needed = %d\n",requiredThreads);
Rprintf("Threads in grid = %d\n",totalThreads);
Rprintf("Surplus threads = %d\n\n", totalThreads - requiredThreads); /* these will need to just sit idle */
Rprintf("Threads per block %d x %d = %d\n",dimBlock.x,dimBlock.y,dimBlock.x*dimBlock.y);
Rprintf("Blocks in grid %d x %d = %d\n",dimGrid.x,dimGrid.y,dimGrid.x*dimGrid.y);
//END_DEBUG
rfhaps_gpu_internal_args internal_args;
internal_args.pair1 = pair1;
internal_args.pair2 = pair2;
internal_args.nMarkers = nMarkers;
internal_args.nFinals = args.nFinals;
internal_args.recombOffset = 0;
internal_args.nRecombToCalculate = args.nRecomb;
internal_args.nRecomb = args.nRecomb;
internal_args.finalsD = finalsD;
internal_args.nIntercrossingD = nIntercrossingD;
internal_args.recombinationFractionsD = recombinationFractionsD;
internal_args.hasAI = args.hasAI;
internal_args.nFounders = args.nFounders;
internal_args.output = args.output;
internal_args.marker2RangeSize = marker2RangeSize;
internal_args.marker1Start = args.marker1Start;
internal_args.marker2Start = args.marker2Start;
internal_args.markerPatternIDs = markerPatternIDsD;
internal_args.allowableMarkerPatterns = allowableMarkerPatternsD;
internal_args.nMarkerPatterns = nMarkerPatterns;
internal_args.lineWeightsD = lineWeightsD;
int counter = 0;
while(donePairs < nPairs)
{
Rprintf("Making cuda call %d\n", counter+1);
internal_args.pairsOffset = donePairs;
if(donePairs + pairsPerCall >= nPairs)
{
internal_args.nPairsToCalculate = nPairs - donePairs;
}
else internal_args.nPairsToCalculate = pairsPerCall;
bool result = rfhaps_gpu_internal(internal_args);
if(!result)
{
Rprintf("A CUDA call failed, exiting...\n");
return false;
}
donePairs += internal_args.nPairsToCalculate;
counter++;
}
delete[] pair1;
delete[] pair2;
Rprintf("Finished all CUDA calls\n");
cudaFree(finalsD);
cudaFree(lineWeightsD);
cudaFree(nIntercrossingD);
cudaFree(recombinationFractionsD);
return true;
}
|
f67d3b59a812a46815492ddefc477c6caf4e051a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hist.cuh"
#include "hist_2_one_byte_base.cuh"
#include "tuning_policy_enums.cuh"
#include <hip/hip_cooperative_groups.h>
#include <library/cpp/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template <int BlockSize>
struct TPointHist2OneByte<6, BlockSize> : public TPointHist2OneByteBase<TPointHist2OneByte<6, BlockSize>, BlockSize> {
using TParent = TPointHist2OneByteBase<TPointHist2OneByte<6, BlockSize>, BlockSize>;
using TPointHist2OneByteBase<TPointHist2OneByte<6, BlockSize>, BlockSize>::Histogram;
__forceinline__ __device__ TPointHist2OneByte(float* buffer)
: TPointHist2OneByteBase<TPointHist2OneByte<6, BlockSize>, BlockSize>(buffer) {
}
__forceinline__ __device__ int SliceOffset() {
const int warpId = (threadIdx.x / 32);
const int warpOffset = 1024 * warpId;
const int blocks = 2;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << 4));
return warpOffset + innerHistStart;
}
template <int N>
__forceinline__ __device__ void AddPointsImpl(const ui32* ci,
const float* s1,
const float* s2) {
thread_block_tile<16> syncTile = tiled_partition<16>(this_thread_block());
const bool flag = threadIdx.x & 1;
float stat1[N];
float stat2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
stat1[k] = flag ? s2[k] : s1[k];
stat2[k] = flag ? s1[k] : s2[k];
}
float val1[N];
float val2[N];
int offset[N];
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = ((2 * i + threadIdx.x) & 6);
#pragma unroll
for (int k = 0; k < N; ++k) {
const int bin = (ci[k] >> (24 - (f << 2))) & 255;
const float pass = bin != 64 ? 1.0f : 0.0f;
val1[k] = pass * stat1[k];
val2[k] = pass * stat2[k];
offset[k] = f + 16 * (bin & 62) + 8 * (bin & 1) + flag;
}
const bool writeFirstFlag = threadIdx.x & 8;
syncTile.sync();
if (writeFirstFlag) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset[k]] += val1[k];
}
}
syncTile.sync();
if (!writeFirstFlag) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset[k]] += val1[k];
}
}
int shift = flag ? -1 : 1;
#pragma unroll
for (int k = 0; k < N; ++k) {
offset[k] += shift;
}
syncTile.sync();
if (writeFirstFlag) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset[k]] += val2[k];
}
}
syncTile.sync();
if (!writeFirstFlag) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset[k]] += val2[k];
}
}
}
}
static constexpr int MaxBits() {
return 6;
}
__forceinline__ __device__ void Reduce() {
TParent::ReduceToOneWarp();
if (threadIdx.x < 256) {
const int isSecondStat = threadIdx.x & 1;
const int f = threadIdx.x / 64;
float sum0 = 0.0f;
float sum1 = 0.0f;
const int fold0 = (threadIdx.x >> 1) & 31;
const int maxFoldCount = 64;
{
const int innerHistCount = 2;
const volatile float* __restrict__ src = Histogram
+ 2048 //warpHistSize
+ 2 * f
+ 8 * (fold0 & 1)
+ 32 * (fold0 >> 1)
+ isSecondStat;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) {
sum0 += src[(inWarpHist << 4)];
sum1 += src[(inWarpHist << 4) + 512];
}
Histogram[maxFoldCount * 4 * isSecondStat + maxFoldCount * f + fold0] = sum0;
Histogram[maxFoldCount * 4 * isSecondStat + maxFoldCount * f + fold0 + 32] = sum1;
}
}
__syncthreads();
}
};
DefineHist2Pass(6)
}
| f67d3b59a812a46815492ddefc477c6caf4e051a.cu | #include "hist.cuh"
#include "hist_2_one_byte_base.cuh"
#include "tuning_policy_enums.cuh"
#include <cooperative_groups.h>
#include <library/cpp/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template <int BlockSize>
struct TPointHist2OneByte<6, BlockSize> : public TPointHist2OneByteBase<TPointHist2OneByte<6, BlockSize>, BlockSize> {
using TParent = TPointHist2OneByteBase<TPointHist2OneByte<6, BlockSize>, BlockSize>;
using TPointHist2OneByteBase<TPointHist2OneByte<6, BlockSize>, BlockSize>::Histogram;
__forceinline__ __device__ TPointHist2OneByte(float* buffer)
: TPointHist2OneByteBase<TPointHist2OneByte<6, BlockSize>, BlockSize>(buffer) {
}
__forceinline__ __device__ int SliceOffset() {
const int warpId = (threadIdx.x / 32);
const int warpOffset = 1024 * warpId;
const int blocks = 2;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << 4));
return warpOffset + innerHistStart;
}
template <int N>
__forceinline__ __device__ void AddPointsImpl(const ui32* ci,
const float* s1,
const float* s2) {
thread_block_tile<16> syncTile = tiled_partition<16>(this_thread_block());
const bool flag = threadIdx.x & 1;
float stat1[N];
float stat2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
stat1[k] = flag ? s2[k] : s1[k];
stat2[k] = flag ? s1[k] : s2[k];
}
float val1[N];
float val2[N];
int offset[N];
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = ((2 * i + threadIdx.x) & 6);
#pragma unroll
for (int k = 0; k < N; ++k) {
const int bin = (ci[k] >> (24 - (f << 2))) & 255;
const float pass = bin != 64 ? 1.0f : 0.0f;
val1[k] = pass * stat1[k];
val2[k] = pass * stat2[k];
offset[k] = f + 16 * (bin & 62) + 8 * (bin & 1) + flag;
}
const bool writeFirstFlag = threadIdx.x & 8;
syncTile.sync();
if (writeFirstFlag) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset[k]] += val1[k];
}
}
syncTile.sync();
if (!writeFirstFlag) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset[k]] += val1[k];
}
}
int shift = flag ? -1 : 1;
#pragma unroll
for (int k = 0; k < N; ++k) {
offset[k] += shift;
}
syncTile.sync();
if (writeFirstFlag) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset[k]] += val2[k];
}
}
syncTile.sync();
if (!writeFirstFlag) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset[k]] += val2[k];
}
}
}
}
static constexpr int MaxBits() {
return 6;
}
__forceinline__ __device__ void Reduce() {
TParent::ReduceToOneWarp();
if (threadIdx.x < 256) {
const int isSecondStat = threadIdx.x & 1;
const int f = threadIdx.x / 64;
float sum0 = 0.0f;
float sum1 = 0.0f;
const int fold0 = (threadIdx.x >> 1) & 31;
const int maxFoldCount = 64;
{
const int innerHistCount = 2;
const volatile float* __restrict__ src = Histogram
+ 2048 //warpHistSize
+ 2 * f
+ 8 * (fold0 & 1)
+ 32 * (fold0 >> 1)
+ isSecondStat;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) {
sum0 += src[(inWarpHist << 4)];
sum1 += src[(inWarpHist << 4) + 512];
}
Histogram[maxFoldCount * 4 * isSecondStat + maxFoldCount * f + fold0] = sum0;
Histogram[maxFoldCount * 4 * isSecondStat + maxFoldCount * f + fold0 + 32] = sum1;
}
}
__syncthreads();
}
};
DefineHist2Pass(6)
}
|
1b10feacdfa9c2499691c9a26c3b408159f53683.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
// We will assign each row to a block
// And each pixel in that row to a thread
const int row = blockIdx.x;
const int col = threadIdx.x;
const int idx = row * numCols + col;
greyImage[idx] = 0.299f * rgbaImage[idx].x;
greyImage[idx] += 0.587f * rgbaImage[idx].y;
greyImage[idx] += 0.114f * rgbaImage[idx].z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
const dim3 blockSize(numCols, 1, 1);
const dim3 gridSize(numRows, 1, 1);
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 1b10feacdfa9c2499691c9a26c3b408159f53683.cu | #include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
// We will assign each row to a block
// And each pixel in that row to a thread
const int row = blockIdx.x;
const int col = threadIdx.x;
const int idx = row * numCols + col;
greyImage[idx] = 0.299f * rgbaImage[idx].x;
greyImage[idx] += 0.587f * rgbaImage[idx].y;
greyImage[idx] += 0.114f * rgbaImage[idx].z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
const dim3 blockSize(numCols, 1, 1);
const dim3 gridSize(numRows, 1, 1);
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
f445e8d09ebfa623d551a2c31a91e12d7243cb4f.hip | // !!! This is a file automatically generated by hipify!!!
#include<cstdio>
int main(){
int deviceNum;
hipGetDeviceCount(&deviceNum);
printf("total %d cards\n", deviceNum);
for(int i = 0; i < deviceNum; i++){
struct hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("card %d: %s\n", i, prop.name);
}
return 0;
}
| f445e8d09ebfa623d551a2c31a91e12d7243cb4f.cu | #include<cstdio>
int main(){
int deviceNum;
cudaGetDeviceCount(&deviceNum);
printf("total %d cards\n", deviceNum);
for(int i = 0; i < deviceNum; i++){
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("card %d: %s\n", i, prop.name);
}
return 0;
}
|
2a742b7199966ff6736ed07fa1487ccbb1d19761.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cfloat>
#include <iostream>
using std::cout;
using std::endl;
#define BLOCKSIZE 512
namespace lsr_space {
template<typename scalar_t>
__forceinline__ __device__ void reduce_sum(scalar_t *sdata, int blocksize, int tid) {
__syncthreads();
// NOTE: block size should be 2 ** x
for (int s{blocksize / 2}; s > 0; s >>= 1) {
if (tid < s) sdata[tid] += sdata[tid + s];
__syncthreads();
}
// // reduce between warps
// if (blocksize >= 1024) {
// if (tid < 512) sdata[tid] += sdata[tid + 512];
// __syncthreads();
// }
// if (blocksize >= 512) {
// if (tid < 256) sdata[tid] += sdata[tid + 256];
// __syncthreads();
// }
// if (blocksize >= 256) {
// if (tid < 128) sdata[tid] += sdata[tid + 128];
// __syncthreads();
// }
// if (blocksize >= 128) {
// if (tid < 64) sdata[tid] += sdata[tid + 64];
// __syncthreads();
// }
// // reduce within warps
// if (tid < 32) {
// if (blocksize >= 64) sdata[tid] += sdata[tid + 32];
// if (blocksize >= 32) sdata[tid] += sdata[tid + 16];
// if (blocksize >= 16) sdata[tid] += sdata[tid + 8];
// if (blocksize >= 8) sdata[tid] += sdata[tid + 4];
// if (blocksize >= 4) sdata[tid] += sdata[tid + 2];
// if (blocksize >= 2) sdata[tid] += sdata[tid + 1];
// }
}
}
// kernel function for forward and backward
template<typename scalar_t>
__global__ void LSRLossForward(const int n_size,
const int dimsize, const int m_size,
const scalar_t *log_scores,
const int64_t *labels,
scalar_t *losses,
const int64_t ignore_index, const float smooth) {
// shared memory
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw);
int shm_offset = blockDim.x;
int sample_offset = gridDim.x * blockDim.y;
sdata = sdata + shm_offset * threadIdx.y;
scalar_t zero(0.f);
int tid = threadIdx.x;
int sample_id = blockIdx.x * blockDim.y + threadIdx.y;
const scalar_t lb_pos(1.f - smooth);
const scalar_t lb_neg = smooth / dimsize;
int samplesize = n_size * m_size;
for (int i{sample_id}; i < samplesize; i += sample_offset) {
int n_idx = i / m_size;
int m_idx = i % m_size;
int64_t lb = labels[i];
if (lb == ignore_index) {
if (tid == 0) losses[i] = zero;
continue;
}
sdata[tid] = zero;
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.x) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
if (j == lb) {
sdata[tid] += -log_scores[idx] * lb_pos;
} else {
sdata[tid] += -log_scores[idx] * lb_neg;
}
}
lsr_space::reduce_sum<scalar_t>(sdata, blockDim.x, tid);
if (tid == 0) losses[i] = sdata[0];
__syncthreads();
}
}
template<typename scalar_t>
__global__ void LSRLossBackward(const int n_size,
const int dimsize, const int m_size,
scalar_t *grad_logits,
const int64_t *labels,
const int64_t ignore_index,
const float smooth) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
const scalar_t lb_pos(1.f - smooth);
const scalar_t lb_neg = smooth / dimsize;
const scalar_t sumy = lb_neg * (dimsize - 1) + lb_pos;
int samplesize = n_size * dimsize * m_size;
int n_offset = dimsize * m_size;
for (int i{tid}; i < samplesize; i += stride) {
int n_idx = i / n_offset;
int dim_idx = (i % n_offset) / m_size;
int m_idx = (i % n_offset) % m_size;
int64_t lb = labels[n_idx * m_size + m_idx];
scalar_t gradval(0);
if (lb != ignore_index) {
if (lb == dim_idx) {
gradval = sumy * grad_logits[i] - lb_pos;
} else {
gradval = sumy * grad_logits[i] - lb_neg;
}
}
grad_logits[i] = gradval;
}
}
template<typename scalar_t>
__global__ void SpatialLSRLossForward(const int n_size,
const int dimsize, const int m_size,
const scalar_t *log_scores,
const int64_t *labels,
scalar_t *losses,
const int64_t ignore_index, const float smooth) {
// shared memory
// TODO: check this setting
__shared__ int sdata[BLOCKSIZE];
sdata[0] = blockIdx.x * blockDim.x + threadIdx.x; //tid
sdata[1] = n_size * m_size; // samplesize
sdata[2] = gridDim.x * blockDim.x; // sample_offset
const scalar_t lb_pos(1.f - smooth);
const scalar_t lb_neg = smooth / dimsize;
for (int i{sdata[0]}; i < sdata[1]; i += sdata[2]) {
int lb = static_cast<int>(labels[i]);
if (lb == ignore_index) {
losses[i] = scalar_t(0.);
continue;
}
int n_idx = i / m_size;
int m_idx = i % m_size;
scalar_t loss_val(0);
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
if (j == lb) {
loss_val -= lb_pos * log_scores[idx];
} else {
loss_val -= lb_neg * log_scores[idx];
}
}
losses[i] = loss_val;
}
}
// cuda forward and backward
at::Tensor LSR_forward_cuda(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index,
const float smooth) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
const int n_size = logits.size(0);
const int dimsize = logits.size(1);
const int m_size = logits.numel() / (n_size * dimsize);
const int samplesize = labels.numel();
// allocate memory and cuda grid/block
auto losses = torch::zeros_like(labels, logits.options());
auto log_scores = torch::log_softmax(logits, 1);
if (losses.numel() == 0) {
THCudaCheck(hipGetLastError());
return losses;
}
if (dimsize < 32 && samplesize > 4096) {
int blockx = 32;
while (blockx < samplesize && blockx < BLOCKSIZE) blockx *= 2;
int gridx = ::max(::min(4096, samplesize / BLOCKSIZE), 1);
dim3 block(blockx);
dim3 grid(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(losses.scalar_type(), "lsr forward", [&] {
int shm_size = BLOCKSIZE * sizeof(scalar_t);
hipLaunchKernelGGL(( SpatialLSRLossForward<scalar_t>), dim3(grid), dim3(block), shm_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n_size, dimsize, m_size,
log_scores.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
losses.contiguous().data_ptr<scalar_t>(),
ignore_index, smooth
);
});
} else {
int blockx = 32;
while (blockx < dimsize) blockx *= 2;
blockx = ::max(::min((int)BLOCKSIZE, blockx / 2), (int)32);
int blocky = ::min(samplesize, (int)(BLOCKSIZE / blockx));
int gridx = ::max(1, ::min(4096, (int)(samplesize / blocky)));
int n_shm = blockx * blocky;
dim3 block(blockx, blocky);
dim3 grid(gridx);
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(losses.scalar_type(), "lsr forward", [&] {
int shm_size = n_shm * sizeof(scalar_t);
hipLaunchKernelGGL(( LSRLossForward<scalar_t>), dim3(grid), dim3(block), shm_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n_size, dimsize, m_size,
log_scores.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
losses.contiguous().data_ptr<scalar_t>(),
ignore_index, smooth
);
});
}
THCudaCheck(hipGetLastError());
return losses;
}
at::Tensor LSR_backward_cuda(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index,
const float smooth) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
const int n_size = logits.size(0);
const int dimsize = logits.size(1);
const int m_size = logits.numel() / (n_size * dimsize);
const int log_size = logits.numel();
// allocate memory and cuda grid/block
auto grad_logits = torch::softmax(logits, 1);
if (grad_logits.numel() == 0) {
THCudaCheck(hipGetLastError());
return grad_logits;
}
int blockx = 32;
while (blockx < log_size && blockx < BLOCKSIZE) blockx *= 2;
dim3 block(blockx);
int gridx = ::max(::min(log_size / BLOCKSIZE, (int)4096), 1);
dim3 grid(gridx);
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "lsr backwrd", [&] {
hipLaunchKernelGGL(( LSRLossBackward<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n_size, dimsize, m_size,
grad_logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
ignore_index, smooth
);
});
THCudaCheck(hipGetLastError());
return grad_logits;
}
// python inferface
at::Tensor LSR_forward(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index,
const float smooth) {
if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) {
AT_ERROR("this LSR loss only supports gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return LSR_forward_cuda(logits, labels, ignore_index, smooth);
}
at::Tensor LSR_backward(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index,
const float smooth) {
// TODO: try AT_ASSERTM
if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) {
AT_ERROR("this LSR loss only supports gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return LSR_backward_cuda(logits, labels, ignore_index, smooth);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("lsr_forward", &LSR_forward, "lsr forward");
m.def("lsr_backward", &LSR_backward, "lsr backward");
}
| 2a742b7199966ff6736ed07fa1487ccbb1d19761.cu |
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cfloat>
#include <iostream>
using std::cout;
using std::endl;
#define BLOCKSIZE 512
namespace lsr_space {
template<typename scalar_t>
__forceinline__ __device__ void reduce_sum(scalar_t *sdata, int blocksize, int tid) {
__syncthreads();
// NOTE: block size should be 2 ** x
for (int s{blocksize / 2}; s > 0; s >>= 1) {
if (tid < s) sdata[tid] += sdata[tid + s];
__syncthreads();
}
// // reduce between warps
// if (blocksize >= 1024) {
// if (tid < 512) sdata[tid] += sdata[tid + 512];
// __syncthreads();
// }
// if (blocksize >= 512) {
// if (tid < 256) sdata[tid] += sdata[tid + 256];
// __syncthreads();
// }
// if (blocksize >= 256) {
// if (tid < 128) sdata[tid] += sdata[tid + 128];
// __syncthreads();
// }
// if (blocksize >= 128) {
// if (tid < 64) sdata[tid] += sdata[tid + 64];
// __syncthreads();
// }
// // reduce within warps
// if (tid < 32) {
// if (blocksize >= 64) sdata[tid] += sdata[tid + 32];
// if (blocksize >= 32) sdata[tid] += sdata[tid + 16];
// if (blocksize >= 16) sdata[tid] += sdata[tid + 8];
// if (blocksize >= 8) sdata[tid] += sdata[tid + 4];
// if (blocksize >= 4) sdata[tid] += sdata[tid + 2];
// if (blocksize >= 2) sdata[tid] += sdata[tid + 1];
// }
}
}
// kernel function for forward and backward
template<typename scalar_t>
__global__ void LSRLossForward(const int n_size,
const int dimsize, const int m_size,
const scalar_t *log_scores,
const int64_t *labels,
scalar_t *losses,
const int64_t ignore_index, const float smooth) {
// shared memory
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw);
int shm_offset = blockDim.x;
int sample_offset = gridDim.x * blockDim.y;
sdata = sdata + shm_offset * threadIdx.y;
scalar_t zero(0.f);
int tid = threadIdx.x;
int sample_id = blockIdx.x * blockDim.y + threadIdx.y;
const scalar_t lb_pos(1.f - smooth);
const scalar_t lb_neg = smooth / dimsize;
int samplesize = n_size * m_size;
for (int i{sample_id}; i < samplesize; i += sample_offset) {
int n_idx = i / m_size;
int m_idx = i % m_size;
int64_t lb = labels[i];
if (lb == ignore_index) {
if (tid == 0) losses[i] = zero;
continue;
}
sdata[tid] = zero;
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.x) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
if (j == lb) {
sdata[tid] += -log_scores[idx] * lb_pos;
} else {
sdata[tid] += -log_scores[idx] * lb_neg;
}
}
lsr_space::reduce_sum<scalar_t>(sdata, blockDim.x, tid);
if (tid == 0) losses[i] = sdata[0];
__syncthreads();
}
}
template<typename scalar_t>
__global__ void LSRLossBackward(const int n_size,
const int dimsize, const int m_size,
scalar_t *grad_logits,
const int64_t *labels,
const int64_t ignore_index,
const float smooth) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
const scalar_t lb_pos(1.f - smooth);
const scalar_t lb_neg = smooth / dimsize;
const scalar_t sumy = lb_neg * (dimsize - 1) + lb_pos;
int samplesize = n_size * dimsize * m_size;
int n_offset = dimsize * m_size;
for (int i{tid}; i < samplesize; i += stride) {
int n_idx = i / n_offset;
int dim_idx = (i % n_offset) / m_size;
int m_idx = (i % n_offset) % m_size;
int64_t lb = labels[n_idx * m_size + m_idx];
scalar_t gradval(0);
if (lb != ignore_index) {
if (lb == dim_idx) {
gradval = sumy * grad_logits[i] - lb_pos;
} else {
gradval = sumy * grad_logits[i] - lb_neg;
}
}
grad_logits[i] = gradval;
}
}
template<typename scalar_t>
__global__ void SpatialLSRLossForward(const int n_size,
const int dimsize, const int m_size,
const scalar_t *log_scores,
const int64_t *labels,
scalar_t *losses,
const int64_t ignore_index, const float smooth) {
// shared memory
// TODO: check this setting
__shared__ int sdata[BLOCKSIZE];
sdata[0] = blockIdx.x * blockDim.x + threadIdx.x; //tid
sdata[1] = n_size * m_size; // samplesize
sdata[2] = gridDim.x * blockDim.x; // sample_offset
const scalar_t lb_pos(1.f - smooth);
const scalar_t lb_neg = smooth / dimsize;
for (int i{sdata[0]}; i < sdata[1]; i += sdata[2]) {
int lb = static_cast<int>(labels[i]);
if (lb == ignore_index) {
losses[i] = scalar_t(0.);
continue;
}
int n_idx = i / m_size;
int m_idx = i % m_size;
scalar_t loss_val(0);
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
if (j == lb) {
loss_val -= lb_pos * log_scores[idx];
} else {
loss_val -= lb_neg * log_scores[idx];
}
}
losses[i] = loss_val;
}
}
// cuda forward and backward
at::Tensor LSR_forward_cuda(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index,
const float smooth) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
const int n_size = logits.size(0);
const int dimsize = logits.size(1);
const int m_size = logits.numel() / (n_size * dimsize);
const int samplesize = labels.numel();
// allocate memory and cuda grid/block
auto losses = torch::zeros_like(labels, logits.options());
auto log_scores = torch::log_softmax(logits, 1);
if (losses.numel() == 0) {
THCudaCheck(cudaGetLastError());
return losses;
}
if (dimsize < 32 && samplesize > 4096) {
int blockx = 32;
while (blockx < samplesize && blockx < BLOCKSIZE) blockx *= 2;
int gridx = std::max(std::min(4096, samplesize / BLOCKSIZE), 1);
dim3 block(blockx);
dim3 grid(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(losses.scalar_type(), "lsr forward", [&] {
int shm_size = BLOCKSIZE * sizeof(scalar_t);
SpatialLSRLossForward<scalar_t><<<grid, block, shm_size, at::cuda::getCurrentCUDAStream()>>>(
n_size, dimsize, m_size,
log_scores.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
losses.contiguous().data_ptr<scalar_t>(),
ignore_index, smooth
);
});
} else {
int blockx = 32;
while (blockx < dimsize) blockx *= 2;
blockx = std::max(std::min((int)BLOCKSIZE, blockx / 2), (int)32);
int blocky = std::min(samplesize, (int)(BLOCKSIZE / blockx));
int gridx = std::max(1, std::min(4096, (int)(samplesize / blocky)));
int n_shm = blockx * blocky;
dim3 block(blockx, blocky);
dim3 grid(gridx);
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(losses.scalar_type(), "lsr forward", [&] {
int shm_size = n_shm * sizeof(scalar_t);
LSRLossForward<scalar_t><<<grid, block, shm_size, at::cuda::getCurrentCUDAStream()>>>(
n_size, dimsize, m_size,
log_scores.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
losses.contiguous().data_ptr<scalar_t>(),
ignore_index, smooth
);
});
}
THCudaCheck(cudaGetLastError());
return losses;
}
at::Tensor LSR_backward_cuda(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index,
const float smooth) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
const int n_size = logits.size(0);
const int dimsize = logits.size(1);
const int m_size = logits.numel() / (n_size * dimsize);
const int log_size = logits.numel();
// allocate memory and cuda grid/block
auto grad_logits = torch::softmax(logits, 1);
if (grad_logits.numel() == 0) {
THCudaCheck(cudaGetLastError());
return grad_logits;
}
int blockx = 32;
while (blockx < log_size && blockx < BLOCKSIZE) blockx *= 2;
dim3 block(blockx);
int gridx = std::max(std::min(log_size / BLOCKSIZE, (int)4096), 1);
dim3 grid(gridx);
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "lsr backwrd", [&] {
LSRLossBackward<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
n_size, dimsize, m_size,
grad_logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
ignore_index, smooth
);
});
THCudaCheck(cudaGetLastError());
return grad_logits;
}
// python inferface
at::Tensor LSR_forward(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index,
const float smooth) {
if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) {
AT_ERROR("this LSR loss only supports gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return LSR_forward_cuda(logits, labels, ignore_index, smooth);
}
at::Tensor LSR_backward(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index,
const float smooth) {
// TODO: try AT_ASSERTM
if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) {
AT_ERROR("this LSR loss only supports gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return LSR_backward_cuda(logits, labels, ignore_index, smooth);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("lsr_forward", &LSR_forward, "lsr forward");
m.def("lsr_backward", &LSR_backward, "lsr backward");
}
|
34c908945a6c1ac5bd804519567871e7cc41b61d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#define M 6
#define N 5
#define IDX2F(i,j,ld) (((j-1)*ld)+(i-1))
static __inline__ void modify (hipblasHandle_t handle, float *m, int ldm, int n, int p,
int q, float alpha, float beta){
hipblasSscal (handle, n - p+1, &alpha, &m[IDX2F(p,q,ldm)], ldm);
hipblasSscal (handle, ldm - p+1, &beta, &m[IDX2F(p,q,ldm)], 1);
}
int main (void){
hipError_t cudaStat;
hipblasStatus_t stat;
hipblasHandle_t handle;
int i, j;
float* devPtrA;
float* a = 0;
a = (float *)malloc (M * N * sizeof (*a));
if (!a) {
printf ("host memory allocation failed");
return EXIT_FAILURE;
}
for (j = 1; j <= N; j++) {
for (i = 1; i <= M; i++) {
a[IDX2F(i,j,M)] = (float)((i-1) * M + j);
}
}
cudaStat = hipMalloc ((void**)&devPtrA, M*N*sizeof(*a));
if ( cudaStat != hipSuccess ) {
printf ("device memory allocation failed");
return EXIT_FAILURE;
}
stat = hipblasCreate(&handle);
if ( stat != HIPBLAS_STATUS_SUCCESS ) {
printf ("CUBLAS initialization failed\n");
return EXIT_FAILURE;
}
stat = hipblasSetMatrix (M, N, sizeof(*a), a, M, devPtrA, M);
if(stat != HIPBLAS_STATUS_SUCCESS) {
printf("data download failed");
hipFree(devPtrA);
hipblasDestroy(handle);
return EXIT_FAILURE;
}
modify (handle, devPtrA, M, N, 2, 3, 16.0f, 12.0f);
stat = hipblasGetMatrix (M, N, sizeof(*a), devPtrA, M, a, M);
if( stat != HIPBLAS_STATUS_SUCCESS ) {
printf ("data upload failed");
hipFree (devPtrA);
hipblasDestroy ( handle );
return EXIT_FAILURE;
}
hipFree ( devPtrA );
hipblasDestroy ( handle );
for (j = 1; j <= N; j++) {
for (i = 1; i <= M; i++) {
printf ("%7.0f", a[IDX2F(i,j,M)]);
}
printf ( "\n" );
}
return EXIT_SUCCESS;
} | 34c908945a6c1ac5bd804519567871e7cc41b61d.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#define M 6
#define N 5
#define IDX2F(i,j,ld) (((j-1)*ld)+(i-1))
static __inline__ void modify (cublasHandle_t handle, float *m, int ldm, int n, int p,
int q, float alpha, float beta){
cublasSscal (handle, n - p+1, &alpha, &m[IDX2F(p,q,ldm)], ldm);
cublasSscal (handle, ldm - p+1, &beta, &m[IDX2F(p,q,ldm)], 1);
}
int main (void){
cudaError_t cudaStat;
cublasStatus_t stat;
cublasHandle_t handle;
int i, j;
float* devPtrA;
float* a = 0;
a = (float *)malloc (M * N * sizeof (*a));
if (!a) {
printf ("host memory allocation failed");
return EXIT_FAILURE;
}
for (j = 1; j <= N; j++) {
for (i = 1; i <= M; i++) {
a[IDX2F(i,j,M)] = (float)((i-1) * M + j);
}
}
cudaStat = cudaMalloc ((void**)&devPtrA, M*N*sizeof(*a));
if ( cudaStat != cudaSuccess ) {
printf ("device memory allocation failed");
return EXIT_FAILURE;
}
stat = cublasCreate(&handle);
if ( stat != CUBLAS_STATUS_SUCCESS ) {
printf ("CUBLAS initialization failed\n");
return EXIT_FAILURE;
}
stat = cublasSetMatrix (M, N, sizeof(*a), a, M, devPtrA, M);
if(stat != CUBLAS_STATUS_SUCCESS) {
printf("data download failed");
cudaFree(devPtrA);
cublasDestroy(handle);
return EXIT_FAILURE;
}
modify (handle, devPtrA, M, N, 2, 3, 16.0f, 12.0f);
stat = cublasGetMatrix (M, N, sizeof(*a), devPtrA, M, a, M);
if( stat != CUBLAS_STATUS_SUCCESS ) {
printf ("data upload failed");
cudaFree (devPtrA);
cublasDestroy ( handle );
return EXIT_FAILURE;
}
cudaFree ( devPtrA );
cublasDestroy ( handle );
for (j = 1; j <= N; j++) {
for (i = 1; i <= M; i++) {
printf ("%7.0f", a[IDX2F(i,j,M)]);
}
printf ( "\n" );
}
return EXIT_SUCCESS;
} |
8d4af100a9142aa4db73c72ea2099f016b37a642.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <matrixmul_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
int errorM = 0, errorN = 0;
srand(52);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
M = AllocateMatrix(rand() % 1024, rand() % 1024, 1);
N = AllocateMatrix(M.width, rand() % 1024, 1);
P = AllocateMatrix(M.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = NULL; //(int*)malloc(3 * sizeof(int));
unsigned int data_read = 3;
cutReadFilei(argv[1], ¶ms, &data_read, true);
if(data_read != 3){
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(params[0], params[1], 0);
N = AllocateMatrix(params[1], params[2], 0);
P = AllocateMatrix(params[0], params[2], 0);
errorM = ReadFile(&M, argv[2]);
errorN = ReadFile(&N, argv[3]);
if(errorM || errorN )
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
printf("GPU computation complete\n");
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width);
printf("CPU computation complete\n");
// in this case check if the result is equivalent to the expected soluion
CUTBoolean res = cutComparefe(reference.elements, P.elements, P.height*P.width, 0.001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// Setup the execution configuration
dim3 block_size(TILE_WIDTH, TILE_WIDTH);
int grid_rows = P.height / TILE_WIDTH + (P.height % TILE_WIDTH ? 1 : 0);
int grid_cols = P.width / TILE_WIDTH + (P.width % TILE_WIDTH ? 1 : 0);
dim3 grid_size(grid_cols, grid_rows);
// Launch the device computation threads!
hipLaunchKernelGGL(( MatrixMulKernel), dim3(grid_size), dim3(block_size) , 0, 0, Md, Nd, Pd);
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size,
hipMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->height*M->width;
cutReadFilef(file_name, &(M->elements), &data_read, true);
return (data_read != (M->height * M->width));
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
cutWriteFilef(file_name, M.elements, M.width*M.height,
0.0001f);
}
| 8d4af100a9142aa4db73c72ea2099f016b37a642.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <matrixmul_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
int errorM = 0, errorN = 0;
srand(52);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
M = AllocateMatrix(rand() % 1024, rand() % 1024, 1);
N = AllocateMatrix(M.width, rand() % 1024, 1);
P = AllocateMatrix(M.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = NULL; //(int*)malloc(3 * sizeof(int));
unsigned int data_read = 3;
cutReadFilei(argv[1], ¶ms, &data_read, true);
if(data_read != 3){
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(params[0], params[1], 0);
N = AllocateMatrix(params[1], params[2], 0);
P = AllocateMatrix(params[0], params[2], 0);
errorM = ReadFile(&M, argv[2]);
errorN = ReadFile(&N, argv[3]);
if(errorM || errorN )
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
printf("GPU computation complete\n");
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width);
printf("CPU computation complete\n");
// in this case check if the result is equivalent to the expected soluion
CUTBoolean res = cutComparefe(reference.elements, P.elements, P.height*P.width, 0.001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// Setup the execution configuration
dim3 block_size(TILE_WIDTH, TILE_WIDTH);
int grid_rows = P.height / TILE_WIDTH + (P.height % TILE_WIDTH ? 1 : 0);
int grid_cols = P.width / TILE_WIDTH + (P.width % TILE_WIDTH ? 1 : 0);
dim3 grid_size(grid_cols, grid_rows);
// Launch the device computation threads!
MatrixMulKernel<<< grid_size, block_size >>> (Md, Nd, Pd);
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size,
cudaMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->height*M->width;
cutReadFilef(file_name, &(M->elements), &data_read, true);
return (data_read != (M->height * M->width));
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
cutWriteFilef(file_name, M.elements, M.width*M.height,
0.0001f);
}
|
60746bc1e2343dc00eb7888d219fc65e4bcf6c9f.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017 XGBoost contributors
*/
#include <dmlc/logging.h>
#include <dmlc/filesystem.h>
#include <xgboost/c_api.h>
#include <xgboost/predictor.h>
#include <string>
#include "gtest/gtest.h"
#include "../helpers.h"
namespace {
inline void CheckCAPICall(int ret) {
ASSERT_EQ(ret, 0) << XGBGetLastError();
}
} // namespace anonymous
extern const std::map<std::string, std::string>&
QueryBoosterConfigurationArguments(BoosterHandle handle);
namespace xgboost {
namespace predictor {
TEST(gpu_predictor, Test) {
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor"));
std::unique_ptr<Predictor> cpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("cpu_predictor"));
gpu_predictor->Init({}, {});
cpu_predictor->Init({}, {});
gbm::GBTreeModel model = CreateTestModel();
int n_row = 5;
int n_col = 5;
auto dmat = CreateDMatrix(n_row, n_col, 0);
// Test predict batch
HostDeviceVector<float> gpu_out_predictions;
HostDeviceVector<float> cpu_out_predictions;
gpu_predictor->PredictBatch((*dmat).get(), &gpu_out_predictions, model, 0);
cpu_predictor->PredictBatch((*dmat).get(), &cpu_out_predictions, model, 0);
std::vector<float>& gpu_out_predictions_h = gpu_out_predictions.HostVector();
std::vector<float>& cpu_out_predictions_h = cpu_out_predictions.HostVector();
float abs_tolerance = 0.001;
for (int i = 0; i < gpu_out_predictions.Size(); i++) {
ASSERT_NEAR(gpu_out_predictions_h[i], cpu_out_predictions_h[i], abs_tolerance);
}
// Test predict instance
const auto &batch = *(*dmat)->GetRowBatches().begin();
for (int i = 0; i < batch.Size(); i++) {
std::vector<float> gpu_instance_out_predictions;
std::vector<float> cpu_instance_out_predictions;
cpu_predictor->PredictInstance(batch[i], &cpu_instance_out_predictions,
model);
gpu_predictor->PredictInstance(batch[i], &gpu_instance_out_predictions,
model);
ASSERT_EQ(gpu_instance_out_predictions[0], cpu_instance_out_predictions[0]);
}
// Test predict leaf
std::vector<float> gpu_leaf_out_predictions;
std::vector<float> cpu_leaf_out_predictions;
cpu_predictor->PredictLeaf((*dmat).get(), &cpu_leaf_out_predictions, model);
gpu_predictor->PredictLeaf((*dmat).get(), &gpu_leaf_out_predictions, model);
for (int i = 0; i < gpu_leaf_out_predictions.size(); i++) {
ASSERT_EQ(gpu_leaf_out_predictions[i], cpu_leaf_out_predictions[i]);
}
// Test predict contribution
std::vector<float> gpu_out_contribution;
std::vector<float> cpu_out_contribution;
cpu_predictor->PredictContribution((*dmat).get(), &cpu_out_contribution, model);
gpu_predictor->PredictContribution((*dmat).get(), &gpu_out_contribution, model);
for (int i = 0; i < gpu_out_contribution.size(); i++) {
ASSERT_EQ(gpu_out_contribution[i], cpu_out_contribution[i]);
}
delete dmat;
}
TEST(gpu_predictor, ExternalMemoryTest) {
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor"));
gpu_predictor->Init({}, {});
gbm::GBTreeModel model = CreateTestModel();
std::unique_ptr<DMatrix> dmat = CreateSparsePageDMatrix(32, 64);
// Test predict batch
HostDeviceVector<float> out_predictions;
gpu_predictor->PredictBatch(dmat.get(), &out_predictions, model, 0);
EXPECT_EQ(out_predictions.Size(), dmat->Info().num_row_);
for (const auto& v : out_predictions.HostVector()) {
ASSERT_EQ(v, 1.5);
}
// Test predict leaf
std::vector<float> leaf_out_predictions;
gpu_predictor->PredictLeaf(dmat.get(), &leaf_out_predictions, model);
EXPECT_EQ(leaf_out_predictions.size(), dmat->Info().num_row_);
for (const auto& v : leaf_out_predictions) {
ASSERT_EQ(v, 0);
}
// Test predict contribution
std::vector<float> out_contribution;
gpu_predictor->PredictContribution(dmat.get(), &out_contribution, model);
EXPECT_EQ(out_contribution.size(), dmat->Info().num_row_);
for (const auto& v : out_contribution) {
ASSERT_EQ(v, 1.5);
}
// Test predict contribution (approximate method)
std::vector<float> out_contribution_approximate;
gpu_predictor->PredictContribution(dmat.get(), &out_contribution_approximate, model, true);
EXPECT_EQ(out_contribution_approximate.size(), dmat->Info().num_row_);
for (const auto& v : out_contribution_approximate) {
ASSERT_EQ(v, 1.5);
}
}
#if defined(XGBOOST_USE_NCCL)
// Test whether pickling preserves predictor parameters
TEST(gpu_predictor, MGPU_PicklingTest) {
int ngpu;
dh::safe_cuda(hipGetDeviceCount(&ngpu));
dmlc::TemporaryDirectory tempdir;
const std::string tmp_file = tempdir.path + "/simple.libsvm";
CreateBigTestData(tmp_file, 600);
DMatrixHandle dmat[1];
BoosterHandle bst, bst2;
std::vector<bst_float> label;
for (int i = 0; i < 200; ++i) {
label.push_back((i % 2 ? 1 : 0));
}
// Load data matrix
CheckCAPICall(XGDMatrixCreateFromFile(tmp_file.c_str(), 0, &dmat[0]));
CheckCAPICall(XGDMatrixSetFloatInfo(dmat[0], "label", label.data(), 200));
// Create booster
CheckCAPICall(XGBoosterCreate(dmat, 1, &bst));
// Set parameters
CheckCAPICall(XGBoosterSetParam(bst, "seed", "0"));
CheckCAPICall(XGBoosterSetParam(bst, "base_score", "0.5"));
CheckCAPICall(XGBoosterSetParam(bst, "booster", "gbtree"));
CheckCAPICall(XGBoosterSetParam(bst, "learning_rate", "0.01"));
CheckCAPICall(XGBoosterSetParam(bst, "max_depth", "8"));
CheckCAPICall(XGBoosterSetParam(bst, "objective", "binary:logistic"));
CheckCAPICall(XGBoosterSetParam(bst, "seed", "123"));
CheckCAPICall(XGBoosterSetParam(bst, "tree_method", "gpu_hist"));
CheckCAPICall(XGBoosterSetParam(bst, "n_gpus", std::to_string(ngpu).c_str()));
CheckCAPICall(XGBoosterSetParam(bst, "predictor", "gpu_predictor"));
// Run boosting iterations
for (int i = 0; i < 10; ++i) {
CheckCAPICall(XGBoosterUpdateOneIter(bst, i, dmat[0]));
}
// Delete matrix
CheckCAPICall(XGDMatrixFree(dmat[0]));
// Pickle
const char* dptr;
bst_ulong len;
std::string buf;
CheckCAPICall(XGBoosterGetModelRaw(bst, &len, &dptr));
buf = std::string(dptr, len);
CheckCAPICall(XGBoosterFree(bst));
// Unpickle
CheckCAPICall(XGBoosterCreate(nullptr, 0, &bst2));
CheckCAPICall(XGBoosterLoadModelFromBuffer(bst2, buf.c_str(), len));
{ // Query predictor
const auto& kwargs = QueryBoosterConfigurationArguments(bst2);
ASSERT_EQ(kwargs.at("predictor"), "gpu_predictor");
ASSERT_EQ(kwargs.at("n_gpus"), std::to_string(ngpu).c_str());
}
{ // Change n_gpus and query again
CheckCAPICall(XGBoosterSetParam(bst2, "n_gpus", "1"));
const auto& kwargs = QueryBoosterConfigurationArguments(bst2);
ASSERT_EQ(kwargs.at("n_gpus"), "1");
}
{ // Change predictor and query again
CheckCAPICall(XGBoosterSetParam(bst2, "predictor", "cpu_predictor"));
const auto& kwargs = QueryBoosterConfigurationArguments(bst2);
ASSERT_EQ(kwargs.at("predictor"), "cpu_predictor");
}
CheckCAPICall(XGBoosterFree(bst2));
}
// multi-GPU predictor test
TEST(gpu_predictor, MGPU_Test) {
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor"));
std::unique_ptr<Predictor> cpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("cpu_predictor"));
gpu_predictor->Init({std::pair<std::string, std::string>("n_gpus", "-1")}, {});
cpu_predictor->Init({}, {});
for (size_t i = 1; i < 33; i *= 2) {
int n_row = i, n_col = i;
auto dmat = CreateDMatrix(n_row, n_col, 0);
gbm::GBTreeModel model = CreateTestModel();
// Test predict batch
HostDeviceVector<float> gpu_out_predictions;
HostDeviceVector<float> cpu_out_predictions;
gpu_predictor->PredictBatch((*dmat).get(), &gpu_out_predictions, model, 0);
cpu_predictor->PredictBatch((*dmat).get(), &cpu_out_predictions, model, 0);
std::vector<float>& gpu_out_predictions_h = gpu_out_predictions.HostVector();
std::vector<float>& cpu_out_predictions_h = cpu_out_predictions.HostVector();
float abs_tolerance = 0.001;
for (int j = 0; j < gpu_out_predictions.Size(); j++) {
ASSERT_NEAR(gpu_out_predictions_h[j], cpu_out_predictions_h[j], abs_tolerance);
}
delete dmat;
}
}
// multi-GPU predictor external memory test
TEST(gpu_predictor, MGPU_ExternalMemoryTest) {
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor"));
gpu_predictor->Init({std::pair<std::string, std::string>("n_gpus", "-1")}, {});
gbm::GBTreeModel model = CreateTestModel();
const int n_classes = 3;
model.param.num_output_group = n_classes;
std::vector<std::unique_ptr<DMatrix>> dmats;
dmats.push_back(CreateSparsePageDMatrix(9, 64UL));
dmats.push_back(CreateSparsePageDMatrix(128, 128UL));
dmats.push_back(CreateSparsePageDMatrix(1024, 1024UL));
for (const auto& dmat: dmats) {
// Test predict batch
HostDeviceVector<float> out_predictions;
gpu_predictor->PredictBatch(dmat.get(), &out_predictions, model, 0);
EXPECT_EQ(out_predictions.Size(), dmat->Info().num_row_ * n_classes);
const std::vector<float> &host_vector = out_predictions.ConstHostVector();
for (int i = 0; i < host_vector.size() / n_classes; i++) {
ASSERT_EQ(host_vector[i * n_classes], 1.5);
ASSERT_EQ(host_vector[i * n_classes + 1], 0.);
ASSERT_EQ(host_vector[i * n_classes + 2], 0.);
}
}
}
#endif // defined(XGBOOST_USE_NCCL)
} // namespace predictor
} // namespace xgboost
| 60746bc1e2343dc00eb7888d219fc65e4bcf6c9f.cu |
/*!
* Copyright 2017 XGBoost contributors
*/
#include <dmlc/logging.h>
#include <dmlc/filesystem.h>
#include <xgboost/c_api.h>
#include <xgboost/predictor.h>
#include <string>
#include "gtest/gtest.h"
#include "../helpers.h"
namespace {
inline void CheckCAPICall(int ret) {
ASSERT_EQ(ret, 0) << XGBGetLastError();
}
} // namespace anonymous
extern const std::map<std::string, std::string>&
QueryBoosterConfigurationArguments(BoosterHandle handle);
namespace xgboost {
namespace predictor {
TEST(gpu_predictor, Test) {
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor"));
std::unique_ptr<Predictor> cpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("cpu_predictor"));
gpu_predictor->Init({}, {});
cpu_predictor->Init({}, {});
gbm::GBTreeModel model = CreateTestModel();
int n_row = 5;
int n_col = 5;
auto dmat = CreateDMatrix(n_row, n_col, 0);
// Test predict batch
HostDeviceVector<float> gpu_out_predictions;
HostDeviceVector<float> cpu_out_predictions;
gpu_predictor->PredictBatch((*dmat).get(), &gpu_out_predictions, model, 0);
cpu_predictor->PredictBatch((*dmat).get(), &cpu_out_predictions, model, 0);
std::vector<float>& gpu_out_predictions_h = gpu_out_predictions.HostVector();
std::vector<float>& cpu_out_predictions_h = cpu_out_predictions.HostVector();
float abs_tolerance = 0.001;
for (int i = 0; i < gpu_out_predictions.Size(); i++) {
ASSERT_NEAR(gpu_out_predictions_h[i], cpu_out_predictions_h[i], abs_tolerance);
}
// Test predict instance
const auto &batch = *(*dmat)->GetRowBatches().begin();
for (int i = 0; i < batch.Size(); i++) {
std::vector<float> gpu_instance_out_predictions;
std::vector<float> cpu_instance_out_predictions;
cpu_predictor->PredictInstance(batch[i], &cpu_instance_out_predictions,
model);
gpu_predictor->PredictInstance(batch[i], &gpu_instance_out_predictions,
model);
ASSERT_EQ(gpu_instance_out_predictions[0], cpu_instance_out_predictions[0]);
}
// Test predict leaf
std::vector<float> gpu_leaf_out_predictions;
std::vector<float> cpu_leaf_out_predictions;
cpu_predictor->PredictLeaf((*dmat).get(), &cpu_leaf_out_predictions, model);
gpu_predictor->PredictLeaf((*dmat).get(), &gpu_leaf_out_predictions, model);
for (int i = 0; i < gpu_leaf_out_predictions.size(); i++) {
ASSERT_EQ(gpu_leaf_out_predictions[i], cpu_leaf_out_predictions[i]);
}
// Test predict contribution
std::vector<float> gpu_out_contribution;
std::vector<float> cpu_out_contribution;
cpu_predictor->PredictContribution((*dmat).get(), &cpu_out_contribution, model);
gpu_predictor->PredictContribution((*dmat).get(), &gpu_out_contribution, model);
for (int i = 0; i < gpu_out_contribution.size(); i++) {
ASSERT_EQ(gpu_out_contribution[i], cpu_out_contribution[i]);
}
delete dmat;
}
TEST(gpu_predictor, ExternalMemoryTest) {
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor"));
gpu_predictor->Init({}, {});
gbm::GBTreeModel model = CreateTestModel();
std::unique_ptr<DMatrix> dmat = CreateSparsePageDMatrix(32, 64);
// Test predict batch
HostDeviceVector<float> out_predictions;
gpu_predictor->PredictBatch(dmat.get(), &out_predictions, model, 0);
EXPECT_EQ(out_predictions.Size(), dmat->Info().num_row_);
for (const auto& v : out_predictions.HostVector()) {
ASSERT_EQ(v, 1.5);
}
// Test predict leaf
std::vector<float> leaf_out_predictions;
gpu_predictor->PredictLeaf(dmat.get(), &leaf_out_predictions, model);
EXPECT_EQ(leaf_out_predictions.size(), dmat->Info().num_row_);
for (const auto& v : leaf_out_predictions) {
ASSERT_EQ(v, 0);
}
// Test predict contribution
std::vector<float> out_contribution;
gpu_predictor->PredictContribution(dmat.get(), &out_contribution, model);
EXPECT_EQ(out_contribution.size(), dmat->Info().num_row_);
for (const auto& v : out_contribution) {
ASSERT_EQ(v, 1.5);
}
// Test predict contribution (approximate method)
std::vector<float> out_contribution_approximate;
gpu_predictor->PredictContribution(dmat.get(), &out_contribution_approximate, model, true);
EXPECT_EQ(out_contribution_approximate.size(), dmat->Info().num_row_);
for (const auto& v : out_contribution_approximate) {
ASSERT_EQ(v, 1.5);
}
}
#if defined(XGBOOST_USE_NCCL)
// Test whether pickling preserves predictor parameters
TEST(gpu_predictor, MGPU_PicklingTest) {
int ngpu;
dh::safe_cuda(cudaGetDeviceCount(&ngpu));
dmlc::TemporaryDirectory tempdir;
const std::string tmp_file = tempdir.path + "/simple.libsvm";
CreateBigTestData(tmp_file, 600);
DMatrixHandle dmat[1];
BoosterHandle bst, bst2;
std::vector<bst_float> label;
for (int i = 0; i < 200; ++i) {
label.push_back((i % 2 ? 1 : 0));
}
// Load data matrix
CheckCAPICall(XGDMatrixCreateFromFile(tmp_file.c_str(), 0, &dmat[0]));
CheckCAPICall(XGDMatrixSetFloatInfo(dmat[0], "label", label.data(), 200));
// Create booster
CheckCAPICall(XGBoosterCreate(dmat, 1, &bst));
// Set parameters
CheckCAPICall(XGBoosterSetParam(bst, "seed", "0"));
CheckCAPICall(XGBoosterSetParam(bst, "base_score", "0.5"));
CheckCAPICall(XGBoosterSetParam(bst, "booster", "gbtree"));
CheckCAPICall(XGBoosterSetParam(bst, "learning_rate", "0.01"));
CheckCAPICall(XGBoosterSetParam(bst, "max_depth", "8"));
CheckCAPICall(XGBoosterSetParam(bst, "objective", "binary:logistic"));
CheckCAPICall(XGBoosterSetParam(bst, "seed", "123"));
CheckCAPICall(XGBoosterSetParam(bst, "tree_method", "gpu_hist"));
CheckCAPICall(XGBoosterSetParam(bst, "n_gpus", std::to_string(ngpu).c_str()));
CheckCAPICall(XGBoosterSetParam(bst, "predictor", "gpu_predictor"));
// Run boosting iterations
for (int i = 0; i < 10; ++i) {
CheckCAPICall(XGBoosterUpdateOneIter(bst, i, dmat[0]));
}
// Delete matrix
CheckCAPICall(XGDMatrixFree(dmat[0]));
// Pickle
const char* dptr;
bst_ulong len;
std::string buf;
CheckCAPICall(XGBoosterGetModelRaw(bst, &len, &dptr));
buf = std::string(dptr, len);
CheckCAPICall(XGBoosterFree(bst));
// Unpickle
CheckCAPICall(XGBoosterCreate(nullptr, 0, &bst2));
CheckCAPICall(XGBoosterLoadModelFromBuffer(bst2, buf.c_str(), len));
{ // Query predictor
const auto& kwargs = QueryBoosterConfigurationArguments(bst2);
ASSERT_EQ(kwargs.at("predictor"), "gpu_predictor");
ASSERT_EQ(kwargs.at("n_gpus"), std::to_string(ngpu).c_str());
}
{ // Change n_gpus and query again
CheckCAPICall(XGBoosterSetParam(bst2, "n_gpus", "1"));
const auto& kwargs = QueryBoosterConfigurationArguments(bst2);
ASSERT_EQ(kwargs.at("n_gpus"), "1");
}
{ // Change predictor and query again
CheckCAPICall(XGBoosterSetParam(bst2, "predictor", "cpu_predictor"));
const auto& kwargs = QueryBoosterConfigurationArguments(bst2);
ASSERT_EQ(kwargs.at("predictor"), "cpu_predictor");
}
CheckCAPICall(XGBoosterFree(bst2));
}
// multi-GPU predictor test
TEST(gpu_predictor, MGPU_Test) {
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor"));
std::unique_ptr<Predictor> cpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("cpu_predictor"));
gpu_predictor->Init({std::pair<std::string, std::string>("n_gpus", "-1")}, {});
cpu_predictor->Init({}, {});
for (size_t i = 1; i < 33; i *= 2) {
int n_row = i, n_col = i;
auto dmat = CreateDMatrix(n_row, n_col, 0);
gbm::GBTreeModel model = CreateTestModel();
// Test predict batch
HostDeviceVector<float> gpu_out_predictions;
HostDeviceVector<float> cpu_out_predictions;
gpu_predictor->PredictBatch((*dmat).get(), &gpu_out_predictions, model, 0);
cpu_predictor->PredictBatch((*dmat).get(), &cpu_out_predictions, model, 0);
std::vector<float>& gpu_out_predictions_h = gpu_out_predictions.HostVector();
std::vector<float>& cpu_out_predictions_h = cpu_out_predictions.HostVector();
float abs_tolerance = 0.001;
for (int j = 0; j < gpu_out_predictions.Size(); j++) {
ASSERT_NEAR(gpu_out_predictions_h[j], cpu_out_predictions_h[j], abs_tolerance);
}
delete dmat;
}
}
// multi-GPU predictor external memory test
TEST(gpu_predictor, MGPU_ExternalMemoryTest) {
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor"));
gpu_predictor->Init({std::pair<std::string, std::string>("n_gpus", "-1")}, {});
gbm::GBTreeModel model = CreateTestModel();
const int n_classes = 3;
model.param.num_output_group = n_classes;
std::vector<std::unique_ptr<DMatrix>> dmats;
dmats.push_back(CreateSparsePageDMatrix(9, 64UL));
dmats.push_back(CreateSparsePageDMatrix(128, 128UL));
dmats.push_back(CreateSparsePageDMatrix(1024, 1024UL));
for (const auto& dmat: dmats) {
// Test predict batch
HostDeviceVector<float> out_predictions;
gpu_predictor->PredictBatch(dmat.get(), &out_predictions, model, 0);
EXPECT_EQ(out_predictions.Size(), dmat->Info().num_row_ * n_classes);
const std::vector<float> &host_vector = out_predictions.ConstHostVector();
for (int i = 0; i < host_vector.size() / n_classes; i++) {
ASSERT_EQ(host_vector[i * n_classes], 1.5);
ASSERT_EQ(host_vector[i * n_classes + 1], 0.);
ASSERT_EQ(host_vector[i * n_classes + 2], 0.);
}
}
}
#endif // defined(XGBOOST_USE_NCCL)
} // namespace predictor
} // namespace xgboost
|
f07ee25802ce1e0901cf1bda6bb83d2f77a97d26.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#ifndef _MARCHING_CUBES_KERNEL_CU_
#define _MARCHING_CUBES_KERNEL_CU_
#include <stdio.h>
#include <string.h>
#include <cutil_inline.h> // includes cuda.h and hip/hip_runtime_api.h
#include <hip/hip_runtime_api.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include "cutil_math.h"
#include "defines.h"
#include "tables.h"
// textures containing look-up tables
texture<uint, 1, hipReadModeElementType> edgeTex;
texture<uint, 1, hipReadModeElementType> triTex;
texture<uint, 1, hipReadModeElementType> numVertsTex;
// volume data
texture<uchar, 1, hipReadModeNormalizedFloat> volumeTex;
extern "C"
void allocateTextures( uint **d_edgeTable, uint **d_triTable, uint **d_numVertsTable )
{
cutilSafeCall(hipMalloc((void**) d_edgeTable, 256*sizeof(uint)));
cutilSafeCall(hipMemcpy((void *)*d_edgeTable, (void *)edgeTable, 256*sizeof(uint), hipMemcpyHostToDevice) );
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindUnsigned);
cutilSafeCall(hipBindTexture(0, edgeTex, *d_edgeTable, channelDesc) );
cutilSafeCall(hipMalloc((void**) d_triTable, 256*16*sizeof(uint)));
cutilSafeCall(hipMemcpy((void *)*d_triTable, (void *)triTable, 256*16*sizeof(uint), hipMemcpyHostToDevice) );
cutilSafeCall(hipBindTexture(0, triTex, *d_triTable, channelDesc) );
cutilSafeCall(hipMalloc((void**) d_numVertsTable, 256*sizeof(uint)));
cutilSafeCall(hipMemcpy((void *)*d_numVertsTable, (void *)numVertsTable, 256*sizeof(uint), hipMemcpyHostToDevice) );
cutilSafeCall(hipBindTexture(0, numVertsTex, *d_numVertsTable, channelDesc) );
}
extern "C"
void bindVolumeTexture(uchar *d_volume)
{
// bind to linear texture
cutilSafeCall(hipBindTexture(0, volumeTex, d_volume, hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindUnsigned)));
}
// an interesting field function
__device__
float tangle(float x, float y, float z)
{
x *= 3.0f;
y *= 3.0f;
z *= 3.0f;
return (x*x*x*x - 5.0f*x*x +y*y*y*y - 5.0f*y*y +z*z*z*z - 5.0f*z*z + 11.8f) * 0.2f + 0.5f;
}
// evaluate field function at point
__device__
float fieldFunc(float3 p)
{
return tangle(p.x, p.y, p.z);
}
// evaluate field function at a point
// returns value and gradient in float4
__device__
float4 fieldFunc4(float3 p)
{
float v = tangle(p.x, p.y, p.z);
const float d = 0.001f;
float dx = tangle(p.x + d, p.y, p.z) - v;
float dy = tangle(p.x, p.y + d, p.z) - v;
float dz = tangle(p.x, p.y, p.z + d) - v;
return make_float4(dx, dy, dz, v);
}
// sample volume data set at a point
__device__
float sampleVolume(uchar *data, uint3 p, uint3 gridSize)
{
p.x = min(p.x, gridSize.x - 1);
p.y = min(p.y, gridSize.y - 1);
p.z = min(p.z, gridSize.z - 1);
uint i = (p.z*gridSize.x*gridSize.y) + (p.y*gridSize.x) + p.x;
// return (float) data[i] / 255.0f;
return tex1Dfetch(volumeTex, i);
}
// compute position in 3d grid from 1d index
// only works for power of 2 sizes
__device__
uint3 calcGridPos(uint i, uint3 gridSizeShift, uint3 gridSizeMask)
{
uint3 gridPos;
gridPos.x = i & gridSizeMask.x;
gridPos.y = (i >> gridSizeShift.y) & gridSizeMask.y;
gridPos.z = (i >> gridSizeShift.z) & gridSizeMask.z;
return gridPos;
}
// classify voxel based on number of vertices it will generate
// one thread per voxel
__global__ void
classifyVoxel(uint* voxelVerts, uint *voxelOccupied, uchar *volume,
uint3 gridSize, uint3 gridSizeShift, uint3 gridSizeMask, uint numVoxels,
float3 voxelSize, float isoValue)
{
uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x;
uint i = __mul24(blockId, blockDim.x) + threadIdx.x;
uint3 gridPos = calcGridPos(i, gridSizeShift, gridSizeMask);
// read field values at neighbouring grid vertices
#if SAMPLE_VOLUME
float field[8];
field[0] = sampleVolume(volume, gridPos, gridSize);
field[1] = sampleVolume(volume, gridPos + make_uint3(1, 0, 0), gridSize);
field[2] = sampleVolume(volume, gridPos + make_uint3(1, 1, 0), gridSize);
field[3] = sampleVolume(volume, gridPos + make_uint3(0, 1, 0), gridSize);
field[4] = sampleVolume(volume, gridPos + make_uint3(0, 0, 1), gridSize);
field[5] = sampleVolume(volume, gridPos + make_uint3(1, 0, 1), gridSize);
field[6] = sampleVolume(volume, gridPos + make_uint3(1, 1, 1), gridSize);
field[7] = sampleVolume(volume, gridPos + make_uint3(0, 1, 1), gridSize);
#else
float3 p;
p.x = -1.0f + (gridPos.x * voxelSize.x);
p.y = -1.0f + (gridPos.y * voxelSize.y);
p.z = -1.0f + (gridPos.z * voxelSize.z);
float field[8];
field[0] = fieldFunc(p);
field[1] = fieldFunc(p + make_float3(voxelSize.x, 0, 0));
field[2] = fieldFunc(p + make_float3(voxelSize.x, voxelSize.y, 0));
field[3] = fieldFunc(p + make_float3(0, voxelSize.y, 0));
field[4] = fieldFunc(p + make_float3(0, 0, voxelSize.z));
field[5] = fieldFunc(p + make_float3(voxelSize.x, 0, voxelSize.z));
field[6] = fieldFunc(p + make_float3(voxelSize.x, voxelSize.y, voxelSize.z));
field[7] = fieldFunc(p + make_float3(0, voxelSize.y, voxelSize.z));
#endif
// calculate flag indicating if each vertex is inside or outside isosurface
uint cubeindex;
cubeindex = uint(field[0] < isoValue);
cubeindex += uint(field[1] < isoValue)*2;
cubeindex += uint(field[2] < isoValue)*4;
cubeindex += uint(field[3] < isoValue)*8;
cubeindex += uint(field[4] < isoValue)*16;
cubeindex += uint(field[5] < isoValue)*32;
cubeindex += uint(field[6] < isoValue)*64;
cubeindex += uint(field[7] < isoValue)*128;
// read number of vertices from texture
uint numVerts = tex1Dfetch(numVertsTex, cubeindex);
if (i < numVoxels) {
voxelVerts[i] = numVerts;
voxelOccupied[i] = (numVerts > 0);
}
}
extern "C" void
launch_classifyVoxel( dim3 grid, dim3 threads, uint* voxelVerts, uint *voxelOccupied, uchar *volume,
uint3 gridSize, uint3 gridSizeShift, uint3 gridSizeMask, uint numVoxels,
float3 voxelSize, float isoValue)
{
// calculate number of vertices need per voxel
hipLaunchKernelGGL(( classifyVoxel), dim3(grid), dim3(threads), 0, 0, voxelVerts, voxelOccupied, volume,
gridSize, gridSizeShift, gridSizeMask,
numVoxels, voxelSize, isoValue);
cutilCheckMsg("classifyVoxel failed");
}
// compact voxel array
__global__ void
compactVoxels(uint *compactedVoxelArray, uint *voxelOccupied, uint *voxelOccupiedScan, uint numVoxels)
{
uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x;
uint i = __mul24(blockId, blockDim.x) + threadIdx.x;
if (voxelOccupied[i] && (i < numVoxels)) {
compactedVoxelArray[ voxelOccupiedScan[i] ] = i;
}
}
extern "C" void
launch_compactVoxels(dim3 grid, dim3 threads, uint *compactedVoxelArray, uint *voxelOccupied, uint *voxelOccupiedScan, uint numVoxels)
{
hipLaunchKernelGGL(( compactVoxels), dim3(grid), dim3(threads), 0, 0, compactedVoxelArray, voxelOccupied,
voxelOccupiedScan, numVoxels);
cutilCheckMsg("compactVoxels failed");
}
// compute interpolated vertex along an edge
__device__
float3 vertexInterp(float isolevel, float3 p0, float3 p1, float f0, float f1)
{
float t = (isolevel - f0) / (f1 - f0);
return lerp(p0, p1, t);
}
// compute interpolated vertex position and normal along an edge
__device__
void vertexInterp2(float isolevel, float3 p0, float3 p1, float4 f0, float4 f1, float3 &p, float3 &n)
{
float t = (isolevel - f0.w) / (f1.w - f0.w);
p = lerp(p0, p1, t);
n.x = lerp(f0.x, f1.x, t);
n.y = lerp(f0.y, f1.y, t);
n.z = lerp(f0.z, f1.z, t);
// n = normalize(n);
}
// generate triangles for each voxel using marching cubes
// interpolates normals from field function
__global__ void
generateTriangles(float4 *pos, float4 *norm, uint *compactedVoxelArray, uint *numVertsScanned,
uint3 gridSize, uint3 gridSizeShift, uint3 gridSizeMask,
float3 voxelSize, float isoValue, uint activeVoxels, uint maxVerts)
{
uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x;
uint i = __mul24(blockId, blockDim.x) + threadIdx.x;
if (i > activeVoxels - 1) {
// can't return here because of syncthreads()
i = activeVoxels - 1;
}
#if SKIP_EMPTY_VOXELS
uint voxel = compactedVoxelArray[i];
#else
uint voxel = i;
#endif
// compute position in 3d grid
uint3 gridPos = calcGridPos(voxel, gridSizeShift, gridSizeMask);
float3 p;
p.x = -1.0f + (gridPos.x * voxelSize.x);
p.y = -1.0f + (gridPos.y * voxelSize.y);
p.z = -1.0f + (gridPos.z * voxelSize.z);
// calculate cell vertex positions
float3 v[8];
v[0] = p;
v[1] = p + make_float3(voxelSize.x, 0, 0);
v[2] = p + make_float3(voxelSize.x, voxelSize.y, 0);
v[3] = p + make_float3(0, voxelSize.y, 0);
v[4] = p + make_float3(0, 0, voxelSize.z);
v[5] = p + make_float3(voxelSize.x, 0, voxelSize.z);
v[6] = p + make_float3(voxelSize.x, voxelSize.y, voxelSize.z);
v[7] = p + make_float3(0, voxelSize.y, voxelSize.z);
// evaluate field values
float4 field[8];
field[0] = fieldFunc4(v[0]);
field[1] = fieldFunc4(v[1]);
field[2] = fieldFunc4(v[2]);
field[3] = fieldFunc4(v[3]);
field[4] = fieldFunc4(v[4]);
field[5] = fieldFunc4(v[5]);
field[6] = fieldFunc4(v[6]);
field[7] = fieldFunc4(v[7]);
// recalculate flag
// (this is faster than storing it in global memory)
uint cubeindex;
cubeindex = uint(field[0].w < isoValue);
cubeindex += uint(field[1].w < isoValue)*2;
cubeindex += uint(field[2].w < isoValue)*4;
cubeindex += uint(field[3].w < isoValue)*8;
cubeindex += uint(field[4].w < isoValue)*16;
cubeindex += uint(field[5].w < isoValue)*32;
cubeindex += uint(field[6].w < isoValue)*64;
cubeindex += uint(field[7].w < isoValue)*128;
// find the vertices where the surface intersects the cube
#if USE_SHARED
// use partioned shared memory to avoid using local memory
__shared__ float3 vertlist[12*NTHREADS];
__shared__ float3 normlist[12*NTHREADS];
vertexInterp2(isoValue, v[0], v[1], field[0], field[1], vertlist[threadIdx.x], normlist[threadIdx.x]);
vertexInterp2(isoValue, v[1], v[2], field[1], field[2], vertlist[threadIdx.x+NTHREADS], normlist[threadIdx.x+NTHREADS]);
vertexInterp2(isoValue, v[2], v[3], field[2], field[3], vertlist[threadIdx.x+(NTHREADS*2)], normlist[threadIdx.x+(NTHREADS*2)]);
vertexInterp2(isoValue, v[3], v[0], field[3], field[0], vertlist[threadIdx.x+(NTHREADS*3)], normlist[threadIdx.x+(NTHREADS*3)]);
vertexInterp2(isoValue, v[4], v[5], field[4], field[5], vertlist[threadIdx.x+(NTHREADS*4)], normlist[threadIdx.x+(NTHREADS*4)]);
vertexInterp2(isoValue, v[5], v[6], field[5], field[6], vertlist[threadIdx.x+(NTHREADS*5)], normlist[threadIdx.x+(NTHREADS*5)]);
vertexInterp2(isoValue, v[6], v[7], field[6], field[7], vertlist[threadIdx.x+(NTHREADS*6)], normlist[threadIdx.x+(NTHREADS*6)]);
vertexInterp2(isoValue, v[7], v[4], field[7], field[4], vertlist[threadIdx.x+(NTHREADS*7)], normlist[threadIdx.x+(NTHREADS*7)]);
vertexInterp2(isoValue, v[0], v[4], field[0], field[4], vertlist[threadIdx.x+(NTHREADS*8)], normlist[threadIdx.x+(NTHREADS*8)]);
vertexInterp2(isoValue, v[1], v[5], field[1], field[5], vertlist[threadIdx.x+(NTHREADS*9)], normlist[threadIdx.x+(NTHREADS*9)]);
vertexInterp2(isoValue, v[2], v[6], field[2], field[6], vertlist[threadIdx.x+(NTHREADS*10)], normlist[threadIdx.x+(NTHREADS*10)]);
vertexInterp2(isoValue, v[3], v[7], field[3], field[7], vertlist[threadIdx.x+(NTHREADS*11)], normlist[threadIdx.x+(NTHREADS*11)]);
__syncthreads();
#else
float3 vertlist[12];
float3 normlist[12];
vertexInterp2(isoValue, v[0], v[1], field[0], field[1], vertlist[0], normlist[0]);
vertexInterp2(isoValue, v[1], v[2], field[1], field[2], vertlist[1], normlist[1]);
vertexInterp2(isoValue, v[2], v[3], field[2], field[3], vertlist[2], normlist[2]);
vertexInterp2(isoValue, v[3], v[0], field[3], field[0], vertlist[3], normlist[3]);
vertexInterp2(isoValue, v[4], v[5], field[4], field[5], vertlist[4], normlist[4]);
vertexInterp2(isoValue, v[5], v[6], field[5], field[6], vertlist[5], normlist[5]);
vertexInterp2(isoValue, v[6], v[7], field[6], field[7], vertlist[6], normlist[6]);
vertexInterp2(isoValue, v[7], v[4], field[7], field[4], vertlist[7], normlist[7]);
vertexInterp2(isoValue, v[0], v[4], field[0], field[4], vertlist[8], normlist[8]);
vertexInterp2(isoValue, v[1], v[5], field[1], field[5], vertlist[9], normlist[9]);
vertexInterp2(isoValue, v[2], v[6], field[2], field[6], vertlist[10], normlist[10]);
vertexInterp2(isoValue, v[3], v[7], field[3], field[7], vertlist[11], normlist[11]);
#endif
// output triangle vertices
uint numVerts = tex1Dfetch(numVertsTex, cubeindex);
for(int i=0; i<numVerts; i++) {
uint edge = tex1Dfetch(triTex, cubeindex*16 + i);
uint index = numVertsScanned[voxel] + i;
if (index < maxVerts) {
#if USE_SHARED
pos[index] = make_float4(vertlist[(edge*NTHREADS)+threadIdx.x], 1.0f);
norm[index] = make_float4(normlist[(edge*NTHREADS)+threadIdx.x], 0.0f);
#else
pos[index] = make_float4(vertlist[edge], 1.0f);
norm[index] = make_float4(normlist[edge], 0.0f);
#endif
}
}
}
extern "C" void
launch_generateTriangles(dim3 grid, dim3 threads,
float4 *pos, float4 *norm, uint *compactedVoxelArray, uint *numVertsScanned,
uint3 gridSize, uint3 gridSizeShift, uint3 gridSizeMask,
float3 voxelSize, float isoValue, uint activeVoxels, uint maxVerts)
{
hipLaunchKernelGGL(( generateTriangles), dim3(grid), dim3(NTHREADS), 0, 0, pos, norm,
compactedVoxelArray,
numVertsScanned,
gridSize, gridSizeShift, gridSizeMask,
voxelSize, isoValue, activeVoxels,
maxVerts);
cutilCheckMsg("generateTriangles failed");
}
// calculate triangle normal
__device__
float3 calcNormal(float3 *v0, float3 *v1, float3 *v2)
{
float3 edge0 = *v1 - *v0;
float3 edge1 = *v2 - *v0;
// note - it's faster to perform normalization in vertex shader rather than here
return cross(edge0, edge1);
}
// version that calculates flat surface normal for each triangle
__global__ void
generateTriangles2(float4 *pos, float4 *norm, uint *compactedVoxelArray, uint *numVertsScanned, uchar *volume,
uint3 gridSize, uint3 gridSizeShift, uint3 gridSizeMask,
float3 voxelSize, float isoValue, uint activeVoxels, uint maxVerts)
{
uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x;
uint i = __mul24(blockId, blockDim.x) + threadIdx.x;
if (i > activeVoxels - 1) {
i = activeVoxels - 1;
}
#if SKIP_EMPTY_VOXELS
uint voxel = compactedVoxelArray[i];
#else
uint voxel = i;
#endif
// compute position in 3d grid
uint3 gridPos = calcGridPos(voxel, gridSizeShift, gridSizeMask);
float3 p;
p.x = -1.0f + (gridPos.x * voxelSize.x);
p.y = -1.0f + (gridPos.y * voxelSize.y);
p.z = -1.0f + (gridPos.z * voxelSize.z);
// calculate cell vertex positions
float3 v[8];
v[0] = p;
v[1] = p + make_float3(voxelSize.x, 0, 0);
v[2] = p + make_float3(voxelSize.x, voxelSize.y, 0);
v[3] = p + make_float3(0, voxelSize.y, 0);
v[4] = p + make_float3(0, 0, voxelSize.z);
v[5] = p + make_float3(voxelSize.x, 0, voxelSize.z);
v[6] = p + make_float3(voxelSize.x, voxelSize.y, voxelSize.z);
v[7] = p + make_float3(0, voxelSize.y, voxelSize.z);
#if SAMPLE_VOLUME
float field[8];
field[0] = sampleVolume(volume, gridPos, gridSize);
field[1] = sampleVolume(volume, gridPos + make_uint3(1, 0, 0), gridSize);
field[2] = sampleVolume(volume, gridPos + make_uint3(1, 1, 0), gridSize);
field[3] = sampleVolume(volume, gridPos + make_uint3(0, 1, 0), gridSize);
field[4] = sampleVolume(volume, gridPos + make_uint3(0, 0, 1), gridSize);
field[5] = sampleVolume(volume, gridPos + make_uint3(1, 0, 1), gridSize);
field[6] = sampleVolume(volume, gridPos + make_uint3(1, 1, 1), gridSize);
field[7] = sampleVolume(volume, gridPos + make_uint3(0, 1, 1), gridSize);
#else
// evaluate field values
float field[8];
field[0] = fieldFunc(v[0]);
field[1] = fieldFunc(v[1]);
field[2] = fieldFunc(v[2]);
field[3] = fieldFunc(v[3]);
field[4] = fieldFunc(v[4]);
field[5] = fieldFunc(v[5]);
field[6] = fieldFunc(v[6]);
field[7] = fieldFunc(v[7]);
#endif
// recalculate flag
uint cubeindex;
cubeindex = uint(field[0] < isoValue);
cubeindex += uint(field[1] < isoValue)*2;
cubeindex += uint(field[2] < isoValue)*4;
cubeindex += uint(field[3] < isoValue)*8;
cubeindex += uint(field[4] < isoValue)*16;
cubeindex += uint(field[5] < isoValue)*32;
cubeindex += uint(field[6] < isoValue)*64;
cubeindex += uint(field[7] < isoValue)*128;
// find the vertices where the surface intersects the cube
#if USE_SHARED
// use shared memory to avoid using local
__shared__ float3 vertlist[12*NTHREADS];
vertlist[threadIdx.x] = vertexInterp(isoValue, v[0], v[1], field[0], field[1]);
vertlist[NTHREADS+threadIdx.x] = vertexInterp(isoValue, v[1], v[2], field[1], field[2]);
vertlist[(NTHREADS*2)+threadIdx.x] = vertexInterp(isoValue, v[2], v[3], field[2], field[3]);
vertlist[(NTHREADS*3)+threadIdx.x] = vertexInterp(isoValue, v[3], v[0], field[3], field[0]);
vertlist[(NTHREADS*4)+threadIdx.x] = vertexInterp(isoValue, v[4], v[5], field[4], field[5]);
vertlist[(NTHREADS*5)+threadIdx.x] = vertexInterp(isoValue, v[5], v[6], field[5], field[6]);
vertlist[(NTHREADS*6)+threadIdx.x] = vertexInterp(isoValue, v[6], v[7], field[6], field[7]);
vertlist[(NTHREADS*7)+threadIdx.x] = vertexInterp(isoValue, v[7], v[4], field[7], field[4]);
vertlist[(NTHREADS*8)+threadIdx.x] = vertexInterp(isoValue, v[0], v[4], field[0], field[4]);
vertlist[(NTHREADS*9)+threadIdx.x] = vertexInterp(isoValue, v[1], v[5], field[1], field[5]);
vertlist[(NTHREADS*10)+threadIdx.x] = vertexInterp(isoValue, v[2], v[6], field[2], field[6]);
vertlist[(NTHREADS*11)+threadIdx.x] = vertexInterp(isoValue, v[3], v[7], field[3], field[7]);
__syncthreads();
#else
float3 vertlist[12];
vertlist[0] = vertexInterp(isoValue, v[0], v[1], field[0], field[1]);
vertlist[1] = vertexInterp(isoValue, v[1], v[2], field[1], field[2]);
vertlist[2] = vertexInterp(isoValue, v[2], v[3], field[2], field[3]);
vertlist[3] = vertexInterp(isoValue, v[3], v[0], field[3], field[0]);
vertlist[4] = vertexInterp(isoValue, v[4], v[5], field[4], field[5]);
vertlist[5] = vertexInterp(isoValue, v[5], v[6], field[5], field[6]);
vertlist[6] = vertexInterp(isoValue, v[6], v[7], field[6], field[7]);
vertlist[7] = vertexInterp(isoValue, v[7], v[4], field[7], field[4]);
vertlist[8] = vertexInterp(isoValue, v[0], v[4], field[0], field[4]);
vertlist[9] = vertexInterp(isoValue, v[1], v[5], field[1], field[5]);
vertlist[10] = vertexInterp(isoValue, v[2], v[6], field[2], field[6]);
vertlist[11] = vertexInterp(isoValue, v[3], v[7], field[3], field[7]);
#endif
// output triangle vertices
uint numVerts = tex1Dfetch(numVertsTex, cubeindex);
for(int i=0; i<numVerts; i+=3) {
uint index = numVertsScanned[voxel] + i;
float3 *v[3];
uint edge;
edge = tex1Dfetch(triTex, (cubeindex*16) + i);
#if USE_SHARED
v[0] = &vertlist[(edge*NTHREADS)+threadIdx.x];
#else
v[0] = &vertlist[edge];
#endif
edge = tex1Dfetch(triTex, (cubeindex*16) + i + 1);
#if USE_SHARED
v[1] = &vertlist[(edge*NTHREADS)+threadIdx.x];
#else
v[1] = &vertlist[edge];
#endif
edge = tex1Dfetch(triTex, (cubeindex*16) + i + 2);
#if USE_SHARED
v[2] = &vertlist[(edge*NTHREADS)+threadIdx.x];
#else
v[2] = &vertlist[edge];
#endif
// calculate triangle surface normal
float3 n = calcNormal(v[0], v[1], v[2]);
if (index < (maxVerts - 3)) {
pos[index] = make_float4(*v[0], 1.0f);
norm[index] = make_float4(n, 0.0f);
pos[index+1] = make_float4(*v[1], 1.0f);
norm[index+1] = make_float4(n, 0.0f);
pos[index+2] = make_float4(*v[2], 1.0f);
norm[index+2] = make_float4(n, 0.0f);
}
}
}
extern "C" void
launch_generateTriangles2(dim3 grid, dim3 threads,
float4 *pos, float4 *norm, uint *compactedVoxelArray, uint *numVertsScanned, uchar *volume,
uint3 gridSize, uint3 gridSizeShift, uint3 gridSizeMask,
float3 voxelSize, float isoValue, uint activeVoxels, uint maxVerts)
{
hipLaunchKernelGGL(( generateTriangles2), dim3(grid), dim3(NTHREADS), 0, 0, pos, norm,
compactedVoxelArray,
numVertsScanned, volume,
gridSize, gridSizeShift, gridSizeMask,
voxelSize, isoValue, activeVoxels,
maxVerts);
cutilCheckMsg("generateTriangles2 failed");
}
extern "C" void ThrustScanWrapper(unsigned int* output, unsigned int* input, unsigned int numElements)
{
thrust::exclusive_scan(thrust::device_ptr<unsigned int>(input),
thrust::device_ptr<unsigned int>(input + numElements),
thrust::device_ptr<unsigned int>(output));
}
#endif
| f07ee25802ce1e0901cf1bda6bb83d2f77a97d26.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#ifndef _MARCHING_CUBES_KERNEL_CU_
#define _MARCHING_CUBES_KERNEL_CU_
#include <stdio.h>
#include <string.h>
#include <cutil_inline.h> // includes cuda.h and cuda_runtime_api.h
#include <cuda_runtime_api.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include "cutil_math.h"
#include "defines.h"
#include "tables.h"
// textures containing look-up tables
texture<uint, 1, cudaReadModeElementType> edgeTex;
texture<uint, 1, cudaReadModeElementType> triTex;
texture<uint, 1, cudaReadModeElementType> numVertsTex;
// volume data
texture<uchar, 1, cudaReadModeNormalizedFloat> volumeTex;
extern "C"
void allocateTextures( uint **d_edgeTable, uint **d_triTable, uint **d_numVertsTable )
{
cutilSafeCall(cudaMalloc((void**) d_edgeTable, 256*sizeof(uint)));
cutilSafeCall(cudaMemcpy((void *)*d_edgeTable, (void *)edgeTable, 256*sizeof(uint), cudaMemcpyHostToDevice) );
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindUnsigned);
cutilSafeCall(cudaBindTexture(0, edgeTex, *d_edgeTable, channelDesc) );
cutilSafeCall(cudaMalloc((void**) d_triTable, 256*16*sizeof(uint)));
cutilSafeCall(cudaMemcpy((void *)*d_triTable, (void *)triTable, 256*16*sizeof(uint), cudaMemcpyHostToDevice) );
cutilSafeCall(cudaBindTexture(0, triTex, *d_triTable, channelDesc) );
cutilSafeCall(cudaMalloc((void**) d_numVertsTable, 256*sizeof(uint)));
cutilSafeCall(cudaMemcpy((void *)*d_numVertsTable, (void *)numVertsTable, 256*sizeof(uint), cudaMemcpyHostToDevice) );
cutilSafeCall(cudaBindTexture(0, numVertsTex, *d_numVertsTable, channelDesc) );
}
extern "C"
void bindVolumeTexture(uchar *d_volume)
{
// bind to linear texture
cutilSafeCall(cudaBindTexture(0, volumeTex, d_volume, cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned)));
}
// an interesting field function
__device__
float tangle(float x, float y, float z)
{
x *= 3.0f;
y *= 3.0f;
z *= 3.0f;
return (x*x*x*x - 5.0f*x*x +y*y*y*y - 5.0f*y*y +z*z*z*z - 5.0f*z*z + 11.8f) * 0.2f + 0.5f;
}
// evaluate field function at point
__device__
float fieldFunc(float3 p)
{
return tangle(p.x, p.y, p.z);
}
// evaluate field function at a point
// returns value and gradient in float4
__device__
float4 fieldFunc4(float3 p)
{
float v = tangle(p.x, p.y, p.z);
const float d = 0.001f;
float dx = tangle(p.x + d, p.y, p.z) - v;
float dy = tangle(p.x, p.y + d, p.z) - v;
float dz = tangle(p.x, p.y, p.z + d) - v;
return make_float4(dx, dy, dz, v);
}
// sample volume data set at a point
__device__
float sampleVolume(uchar *data, uint3 p, uint3 gridSize)
{
p.x = min(p.x, gridSize.x - 1);
p.y = min(p.y, gridSize.y - 1);
p.z = min(p.z, gridSize.z - 1);
uint i = (p.z*gridSize.x*gridSize.y) + (p.y*gridSize.x) + p.x;
// return (float) data[i] / 255.0f;
return tex1Dfetch(volumeTex, i);
}
// compute position in 3d grid from 1d index
// only works for power of 2 sizes
__device__
uint3 calcGridPos(uint i, uint3 gridSizeShift, uint3 gridSizeMask)
{
uint3 gridPos;
gridPos.x = i & gridSizeMask.x;
gridPos.y = (i >> gridSizeShift.y) & gridSizeMask.y;
gridPos.z = (i >> gridSizeShift.z) & gridSizeMask.z;
return gridPos;
}
// classify voxel based on number of vertices it will generate
// one thread per voxel
__global__ void
classifyVoxel(uint* voxelVerts, uint *voxelOccupied, uchar *volume,
uint3 gridSize, uint3 gridSizeShift, uint3 gridSizeMask, uint numVoxels,
float3 voxelSize, float isoValue)
{
uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x;
uint i = __mul24(blockId, blockDim.x) + threadIdx.x;
uint3 gridPos = calcGridPos(i, gridSizeShift, gridSizeMask);
// read field values at neighbouring grid vertices
#if SAMPLE_VOLUME
float field[8];
field[0] = sampleVolume(volume, gridPos, gridSize);
field[1] = sampleVolume(volume, gridPos + make_uint3(1, 0, 0), gridSize);
field[2] = sampleVolume(volume, gridPos + make_uint3(1, 1, 0), gridSize);
field[3] = sampleVolume(volume, gridPos + make_uint3(0, 1, 0), gridSize);
field[4] = sampleVolume(volume, gridPos + make_uint3(0, 0, 1), gridSize);
field[5] = sampleVolume(volume, gridPos + make_uint3(1, 0, 1), gridSize);
field[6] = sampleVolume(volume, gridPos + make_uint3(1, 1, 1), gridSize);
field[7] = sampleVolume(volume, gridPos + make_uint3(0, 1, 1), gridSize);
#else
float3 p;
p.x = -1.0f + (gridPos.x * voxelSize.x);
p.y = -1.0f + (gridPos.y * voxelSize.y);
p.z = -1.0f + (gridPos.z * voxelSize.z);
float field[8];
field[0] = fieldFunc(p);
field[1] = fieldFunc(p + make_float3(voxelSize.x, 0, 0));
field[2] = fieldFunc(p + make_float3(voxelSize.x, voxelSize.y, 0));
field[3] = fieldFunc(p + make_float3(0, voxelSize.y, 0));
field[4] = fieldFunc(p + make_float3(0, 0, voxelSize.z));
field[5] = fieldFunc(p + make_float3(voxelSize.x, 0, voxelSize.z));
field[6] = fieldFunc(p + make_float3(voxelSize.x, voxelSize.y, voxelSize.z));
field[7] = fieldFunc(p + make_float3(0, voxelSize.y, voxelSize.z));
#endif
// calculate flag indicating if each vertex is inside or outside isosurface
uint cubeindex;
cubeindex = uint(field[0] < isoValue);
cubeindex += uint(field[1] < isoValue)*2;
cubeindex += uint(field[2] < isoValue)*4;
cubeindex += uint(field[3] < isoValue)*8;
cubeindex += uint(field[4] < isoValue)*16;
cubeindex += uint(field[5] < isoValue)*32;
cubeindex += uint(field[6] < isoValue)*64;
cubeindex += uint(field[7] < isoValue)*128;
// read number of vertices from texture
uint numVerts = tex1Dfetch(numVertsTex, cubeindex);
if (i < numVoxels) {
voxelVerts[i] = numVerts;
voxelOccupied[i] = (numVerts > 0);
}
}
extern "C" void
launch_classifyVoxel( dim3 grid, dim3 threads, uint* voxelVerts, uint *voxelOccupied, uchar *volume,
uint3 gridSize, uint3 gridSizeShift, uint3 gridSizeMask, uint numVoxels,
float3 voxelSize, float isoValue)
{
// calculate number of vertices need per voxel
classifyVoxel<<<grid, threads>>>(voxelVerts, voxelOccupied, volume,
gridSize, gridSizeShift, gridSizeMask,
numVoxels, voxelSize, isoValue);
cutilCheckMsg("classifyVoxel failed");
}
// compact voxel array
__global__ void
compactVoxels(uint *compactedVoxelArray, uint *voxelOccupied, uint *voxelOccupiedScan, uint numVoxels)
{
uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x;
uint i = __mul24(blockId, blockDim.x) + threadIdx.x;
if (voxelOccupied[i] && (i < numVoxels)) {
compactedVoxelArray[ voxelOccupiedScan[i] ] = i;
}
}
extern "C" void
launch_compactVoxels(dim3 grid, dim3 threads, uint *compactedVoxelArray, uint *voxelOccupied, uint *voxelOccupiedScan, uint numVoxels)
{
compactVoxels<<<grid, threads>>>(compactedVoxelArray, voxelOccupied,
voxelOccupiedScan, numVoxels);
cutilCheckMsg("compactVoxels failed");
}
// compute interpolated vertex along an edge
__device__
float3 vertexInterp(float isolevel, float3 p0, float3 p1, float f0, float f1)
{
float t = (isolevel - f0) / (f1 - f0);
return lerp(p0, p1, t);
}
// compute interpolated vertex position and normal along an edge
__device__
void vertexInterp2(float isolevel, float3 p0, float3 p1, float4 f0, float4 f1, float3 &p, float3 &n)
{
float t = (isolevel - f0.w) / (f1.w - f0.w);
p = lerp(p0, p1, t);
n.x = lerp(f0.x, f1.x, t);
n.y = lerp(f0.y, f1.y, t);
n.z = lerp(f0.z, f1.z, t);
// n = normalize(n);
}
// generate triangles for each voxel using marching cubes
// interpolates normals from field function
__global__ void
generateTriangles(float4 *pos, float4 *norm, uint *compactedVoxelArray, uint *numVertsScanned,
uint3 gridSize, uint3 gridSizeShift, uint3 gridSizeMask,
float3 voxelSize, float isoValue, uint activeVoxels, uint maxVerts)
{
uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x;
uint i = __mul24(blockId, blockDim.x) + threadIdx.x;
if (i > activeVoxels - 1) {
// can't return here because of syncthreads()
i = activeVoxels - 1;
}
#if SKIP_EMPTY_VOXELS
uint voxel = compactedVoxelArray[i];
#else
uint voxel = i;
#endif
// compute position in 3d grid
uint3 gridPos = calcGridPos(voxel, gridSizeShift, gridSizeMask);
float3 p;
p.x = -1.0f + (gridPos.x * voxelSize.x);
p.y = -1.0f + (gridPos.y * voxelSize.y);
p.z = -1.0f + (gridPos.z * voxelSize.z);
// calculate cell vertex positions
float3 v[8];
v[0] = p;
v[1] = p + make_float3(voxelSize.x, 0, 0);
v[2] = p + make_float3(voxelSize.x, voxelSize.y, 0);
v[3] = p + make_float3(0, voxelSize.y, 0);
v[4] = p + make_float3(0, 0, voxelSize.z);
v[5] = p + make_float3(voxelSize.x, 0, voxelSize.z);
v[6] = p + make_float3(voxelSize.x, voxelSize.y, voxelSize.z);
v[7] = p + make_float3(0, voxelSize.y, voxelSize.z);
// evaluate field values
float4 field[8];
field[0] = fieldFunc4(v[0]);
field[1] = fieldFunc4(v[1]);
field[2] = fieldFunc4(v[2]);
field[3] = fieldFunc4(v[3]);
field[4] = fieldFunc4(v[4]);
field[5] = fieldFunc4(v[5]);
field[6] = fieldFunc4(v[6]);
field[7] = fieldFunc4(v[7]);
// recalculate flag
// (this is faster than storing it in global memory)
uint cubeindex;
cubeindex = uint(field[0].w < isoValue);
cubeindex += uint(field[1].w < isoValue)*2;
cubeindex += uint(field[2].w < isoValue)*4;
cubeindex += uint(field[3].w < isoValue)*8;
cubeindex += uint(field[4].w < isoValue)*16;
cubeindex += uint(field[5].w < isoValue)*32;
cubeindex += uint(field[6].w < isoValue)*64;
cubeindex += uint(field[7].w < isoValue)*128;
// find the vertices where the surface intersects the cube
#if USE_SHARED
// use partioned shared memory to avoid using local memory
__shared__ float3 vertlist[12*NTHREADS];
__shared__ float3 normlist[12*NTHREADS];
vertexInterp2(isoValue, v[0], v[1], field[0], field[1], vertlist[threadIdx.x], normlist[threadIdx.x]);
vertexInterp2(isoValue, v[1], v[2], field[1], field[2], vertlist[threadIdx.x+NTHREADS], normlist[threadIdx.x+NTHREADS]);
vertexInterp2(isoValue, v[2], v[3], field[2], field[3], vertlist[threadIdx.x+(NTHREADS*2)], normlist[threadIdx.x+(NTHREADS*2)]);
vertexInterp2(isoValue, v[3], v[0], field[3], field[0], vertlist[threadIdx.x+(NTHREADS*3)], normlist[threadIdx.x+(NTHREADS*3)]);
vertexInterp2(isoValue, v[4], v[5], field[4], field[5], vertlist[threadIdx.x+(NTHREADS*4)], normlist[threadIdx.x+(NTHREADS*4)]);
vertexInterp2(isoValue, v[5], v[6], field[5], field[6], vertlist[threadIdx.x+(NTHREADS*5)], normlist[threadIdx.x+(NTHREADS*5)]);
vertexInterp2(isoValue, v[6], v[7], field[6], field[7], vertlist[threadIdx.x+(NTHREADS*6)], normlist[threadIdx.x+(NTHREADS*6)]);
vertexInterp2(isoValue, v[7], v[4], field[7], field[4], vertlist[threadIdx.x+(NTHREADS*7)], normlist[threadIdx.x+(NTHREADS*7)]);
vertexInterp2(isoValue, v[0], v[4], field[0], field[4], vertlist[threadIdx.x+(NTHREADS*8)], normlist[threadIdx.x+(NTHREADS*8)]);
vertexInterp2(isoValue, v[1], v[5], field[1], field[5], vertlist[threadIdx.x+(NTHREADS*9)], normlist[threadIdx.x+(NTHREADS*9)]);
vertexInterp2(isoValue, v[2], v[6], field[2], field[6], vertlist[threadIdx.x+(NTHREADS*10)], normlist[threadIdx.x+(NTHREADS*10)]);
vertexInterp2(isoValue, v[3], v[7], field[3], field[7], vertlist[threadIdx.x+(NTHREADS*11)], normlist[threadIdx.x+(NTHREADS*11)]);
__syncthreads();
#else
float3 vertlist[12];
float3 normlist[12];
vertexInterp2(isoValue, v[0], v[1], field[0], field[1], vertlist[0], normlist[0]);
vertexInterp2(isoValue, v[1], v[2], field[1], field[2], vertlist[1], normlist[1]);
vertexInterp2(isoValue, v[2], v[3], field[2], field[3], vertlist[2], normlist[2]);
vertexInterp2(isoValue, v[3], v[0], field[3], field[0], vertlist[3], normlist[3]);
vertexInterp2(isoValue, v[4], v[5], field[4], field[5], vertlist[4], normlist[4]);
vertexInterp2(isoValue, v[5], v[6], field[5], field[6], vertlist[5], normlist[5]);
vertexInterp2(isoValue, v[6], v[7], field[6], field[7], vertlist[6], normlist[6]);
vertexInterp2(isoValue, v[7], v[4], field[7], field[4], vertlist[7], normlist[7]);
vertexInterp2(isoValue, v[0], v[4], field[0], field[4], vertlist[8], normlist[8]);
vertexInterp2(isoValue, v[1], v[5], field[1], field[5], vertlist[9], normlist[9]);
vertexInterp2(isoValue, v[2], v[6], field[2], field[6], vertlist[10], normlist[10]);
vertexInterp2(isoValue, v[3], v[7], field[3], field[7], vertlist[11], normlist[11]);
#endif
// output triangle vertices
uint numVerts = tex1Dfetch(numVertsTex, cubeindex);
for(int i=0; i<numVerts; i++) {
uint edge = tex1Dfetch(triTex, cubeindex*16 + i);
uint index = numVertsScanned[voxel] + i;
if (index < maxVerts) {
#if USE_SHARED
pos[index] = make_float4(vertlist[(edge*NTHREADS)+threadIdx.x], 1.0f);
norm[index] = make_float4(normlist[(edge*NTHREADS)+threadIdx.x], 0.0f);
#else
pos[index] = make_float4(vertlist[edge], 1.0f);
norm[index] = make_float4(normlist[edge], 0.0f);
#endif
}
}
}
extern "C" void
launch_generateTriangles(dim3 grid, dim3 threads,
float4 *pos, float4 *norm, uint *compactedVoxelArray, uint *numVertsScanned,
uint3 gridSize, uint3 gridSizeShift, uint3 gridSizeMask,
float3 voxelSize, float isoValue, uint activeVoxels, uint maxVerts)
{
generateTriangles<<<grid, NTHREADS>>>(pos, norm,
compactedVoxelArray,
numVertsScanned,
gridSize, gridSizeShift, gridSizeMask,
voxelSize, isoValue, activeVoxels,
maxVerts);
cutilCheckMsg("generateTriangles failed");
}
// calculate triangle normal
__device__
float3 calcNormal(float3 *v0, float3 *v1, float3 *v2)
{
float3 edge0 = *v1 - *v0;
float3 edge1 = *v2 - *v0;
// note - it's faster to perform normalization in vertex shader rather than here
return cross(edge0, edge1);
}
// version that calculates flat surface normal for each triangle
__global__ void
generateTriangles2(float4 *pos, float4 *norm, uint *compactedVoxelArray, uint *numVertsScanned, uchar *volume,
uint3 gridSize, uint3 gridSizeShift, uint3 gridSizeMask,
float3 voxelSize, float isoValue, uint activeVoxels, uint maxVerts)
{
uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x;
uint i = __mul24(blockId, blockDim.x) + threadIdx.x;
if (i > activeVoxels - 1) {
i = activeVoxels - 1;
}
#if SKIP_EMPTY_VOXELS
uint voxel = compactedVoxelArray[i];
#else
uint voxel = i;
#endif
// compute position in 3d grid
uint3 gridPos = calcGridPos(voxel, gridSizeShift, gridSizeMask);
float3 p;
p.x = -1.0f + (gridPos.x * voxelSize.x);
p.y = -1.0f + (gridPos.y * voxelSize.y);
p.z = -1.0f + (gridPos.z * voxelSize.z);
// calculate cell vertex positions
float3 v[8];
v[0] = p;
v[1] = p + make_float3(voxelSize.x, 0, 0);
v[2] = p + make_float3(voxelSize.x, voxelSize.y, 0);
v[3] = p + make_float3(0, voxelSize.y, 0);
v[4] = p + make_float3(0, 0, voxelSize.z);
v[5] = p + make_float3(voxelSize.x, 0, voxelSize.z);
v[6] = p + make_float3(voxelSize.x, voxelSize.y, voxelSize.z);
v[7] = p + make_float3(0, voxelSize.y, voxelSize.z);
#if SAMPLE_VOLUME
float field[8];
field[0] = sampleVolume(volume, gridPos, gridSize);
field[1] = sampleVolume(volume, gridPos + make_uint3(1, 0, 0), gridSize);
field[2] = sampleVolume(volume, gridPos + make_uint3(1, 1, 0), gridSize);
field[3] = sampleVolume(volume, gridPos + make_uint3(0, 1, 0), gridSize);
field[4] = sampleVolume(volume, gridPos + make_uint3(0, 0, 1), gridSize);
field[5] = sampleVolume(volume, gridPos + make_uint3(1, 0, 1), gridSize);
field[6] = sampleVolume(volume, gridPos + make_uint3(1, 1, 1), gridSize);
field[7] = sampleVolume(volume, gridPos + make_uint3(0, 1, 1), gridSize);
#else
// evaluate field values
float field[8];
field[0] = fieldFunc(v[0]);
field[1] = fieldFunc(v[1]);
field[2] = fieldFunc(v[2]);
field[3] = fieldFunc(v[3]);
field[4] = fieldFunc(v[4]);
field[5] = fieldFunc(v[5]);
field[6] = fieldFunc(v[6]);
field[7] = fieldFunc(v[7]);
#endif
// recalculate flag
uint cubeindex;
cubeindex = uint(field[0] < isoValue);
cubeindex += uint(field[1] < isoValue)*2;
cubeindex += uint(field[2] < isoValue)*4;
cubeindex += uint(field[3] < isoValue)*8;
cubeindex += uint(field[4] < isoValue)*16;
cubeindex += uint(field[5] < isoValue)*32;
cubeindex += uint(field[6] < isoValue)*64;
cubeindex += uint(field[7] < isoValue)*128;
// find the vertices where the surface intersects the cube
#if USE_SHARED
// use shared memory to avoid using local
__shared__ float3 vertlist[12*NTHREADS];
vertlist[threadIdx.x] = vertexInterp(isoValue, v[0], v[1], field[0], field[1]);
vertlist[NTHREADS+threadIdx.x] = vertexInterp(isoValue, v[1], v[2], field[1], field[2]);
vertlist[(NTHREADS*2)+threadIdx.x] = vertexInterp(isoValue, v[2], v[3], field[2], field[3]);
vertlist[(NTHREADS*3)+threadIdx.x] = vertexInterp(isoValue, v[3], v[0], field[3], field[0]);
vertlist[(NTHREADS*4)+threadIdx.x] = vertexInterp(isoValue, v[4], v[5], field[4], field[5]);
vertlist[(NTHREADS*5)+threadIdx.x] = vertexInterp(isoValue, v[5], v[6], field[5], field[6]);
vertlist[(NTHREADS*6)+threadIdx.x] = vertexInterp(isoValue, v[6], v[7], field[6], field[7]);
vertlist[(NTHREADS*7)+threadIdx.x] = vertexInterp(isoValue, v[7], v[4], field[7], field[4]);
vertlist[(NTHREADS*8)+threadIdx.x] = vertexInterp(isoValue, v[0], v[4], field[0], field[4]);
vertlist[(NTHREADS*9)+threadIdx.x] = vertexInterp(isoValue, v[1], v[5], field[1], field[5]);
vertlist[(NTHREADS*10)+threadIdx.x] = vertexInterp(isoValue, v[2], v[6], field[2], field[6]);
vertlist[(NTHREADS*11)+threadIdx.x] = vertexInterp(isoValue, v[3], v[7], field[3], field[7]);
__syncthreads();
#else
float3 vertlist[12];
vertlist[0] = vertexInterp(isoValue, v[0], v[1], field[0], field[1]);
vertlist[1] = vertexInterp(isoValue, v[1], v[2], field[1], field[2]);
vertlist[2] = vertexInterp(isoValue, v[2], v[3], field[2], field[3]);
vertlist[3] = vertexInterp(isoValue, v[3], v[0], field[3], field[0]);
vertlist[4] = vertexInterp(isoValue, v[4], v[5], field[4], field[5]);
vertlist[5] = vertexInterp(isoValue, v[5], v[6], field[5], field[6]);
vertlist[6] = vertexInterp(isoValue, v[6], v[7], field[6], field[7]);
vertlist[7] = vertexInterp(isoValue, v[7], v[4], field[7], field[4]);
vertlist[8] = vertexInterp(isoValue, v[0], v[4], field[0], field[4]);
vertlist[9] = vertexInterp(isoValue, v[1], v[5], field[1], field[5]);
vertlist[10] = vertexInterp(isoValue, v[2], v[6], field[2], field[6]);
vertlist[11] = vertexInterp(isoValue, v[3], v[7], field[3], field[7]);
#endif
// output triangle vertices
uint numVerts = tex1Dfetch(numVertsTex, cubeindex);
for(int i=0; i<numVerts; i+=3) {
uint index = numVertsScanned[voxel] + i;
float3 *v[3];
uint edge;
edge = tex1Dfetch(triTex, (cubeindex*16) + i);
#if USE_SHARED
v[0] = &vertlist[(edge*NTHREADS)+threadIdx.x];
#else
v[0] = &vertlist[edge];
#endif
edge = tex1Dfetch(triTex, (cubeindex*16) + i + 1);
#if USE_SHARED
v[1] = &vertlist[(edge*NTHREADS)+threadIdx.x];
#else
v[1] = &vertlist[edge];
#endif
edge = tex1Dfetch(triTex, (cubeindex*16) + i + 2);
#if USE_SHARED
v[2] = &vertlist[(edge*NTHREADS)+threadIdx.x];
#else
v[2] = &vertlist[edge];
#endif
// calculate triangle surface normal
float3 n = calcNormal(v[0], v[1], v[2]);
if (index < (maxVerts - 3)) {
pos[index] = make_float4(*v[0], 1.0f);
norm[index] = make_float4(n, 0.0f);
pos[index+1] = make_float4(*v[1], 1.0f);
norm[index+1] = make_float4(n, 0.0f);
pos[index+2] = make_float4(*v[2], 1.0f);
norm[index+2] = make_float4(n, 0.0f);
}
}
}
extern "C" void
launch_generateTriangles2(dim3 grid, dim3 threads,
float4 *pos, float4 *norm, uint *compactedVoxelArray, uint *numVertsScanned, uchar *volume,
uint3 gridSize, uint3 gridSizeShift, uint3 gridSizeMask,
float3 voxelSize, float isoValue, uint activeVoxels, uint maxVerts)
{
generateTriangles2<<<grid, NTHREADS>>>(pos, norm,
compactedVoxelArray,
numVertsScanned, volume,
gridSize, gridSizeShift, gridSizeMask,
voxelSize, isoValue, activeVoxels,
maxVerts);
cutilCheckMsg("generateTriangles2 failed");
}
extern "C" void ThrustScanWrapper(unsigned int* output, unsigned int* input, unsigned int numElements)
{
thrust::exclusive_scan(thrust::device_ptr<unsigned int>(input),
thrust::device_ptr<unsigned int>(input + numElements),
thrust::device_ptr<unsigned int>(output));
}
#endif
|
487c65bdbd9168e5e7945e15a9a234c11ee7bddf.hip | // !!! This is a file automatically generated by hipify!!!
//#include "common.h"
#include "header.h"
#include <map>
#include <set>
using namespace std;
/* #define NO_OPT */
extern "C" {
static int is_init=0;
static map<int,set<void*> > free_list_gpu, free_list_host;
static map<void *,int> live_ptrs_gpu, live_ptrs_host;
static void clearGpuFreeList() {
for(map<int,set<void*> >::iterator it=free_list_gpu.begin();
it!=free_list_gpu.end(); ++it) {
for(set<void*>::iterator it2=it->second.begin();
it2!=it->second.end(); ++it2) {
hipFree(*it2);
}
}
free_list_gpu.clear();
}
static void clearHostFreeList() {
for(map<int,set<void*> >::iterator it=free_list_host.begin();
it!=free_list_host.end(); ++it) {
for(set<void*>::iterator it2=it->second.begin();
it2!=it->second.end(); ++it2) {
hipHostFree(*it2);
}
}
free_list_host.clear();
}
static int num_resurrections=0, num_morecore=0;
typedef hipError_t (*mallocfn_t)(void **ptr, size_t bytes);
static void *morecore(mallocfn_t fn, size_t bytes) {
void *ptr;
CUDA_SAFE(fn((void **)&ptr, bytes));
num_morecore += 1;
if(ptr==NULL) {
/*try one more time*/
clearHostFreeList();
clearGpuFreeList();
fn((void **)&ptr, bytes);
}
assert(ptr!=NULL); /*We hopefully have a pointer*/
return ptr;
}
static inline void *resurrect_from_free_list(map<int,set<void *> > &free_map,
size_t bytes, map<void*,int>& liveset) {
void *ptr;
num_resurrections +=1 ;
assert(free_map.find(bytes) != free_map.end());
/* assert(free_map.find(bytes)->second.size() > 0); */
set<void *> &st = free_map.find(bytes)->second;
ptr = *st.begin();
st.erase(ptr);
if(st.size()==0)
free_map.erase(bytes);
liveset[ptr] = bytes;
return ptr;
}
void initmemmodule_() {
is_init=1;
}
void *getGpuMem(size_t bytes) {
assert(is_init);
void *ptr;
#ifdef NO_OPT
CUDA_SAFE(hipMalloc((void **) &ptr, bytes));
#else
if(free_list_gpu.find(bytes)!=free_list_gpu.end()) {
set<void*> &lst = free_list_gpu.find(bytes)->second;
if(lst.size()!=0) {
ptr = resurrect_from_free_list(free_list_gpu, bytes, live_ptrs_gpu);
return ptr;
}
}
else {
for(map<int,set<void *> >::iterator it=free_list_gpu.begin();
it != free_list_gpu.end(); ++it) {
if(it->first >= bytes && it->second.size()>0) {
ptr = resurrect_from_free_list(free_list_gpu, it->first, live_ptrs_gpu);
return ptr;
}
}
}
ptr = morecore(hipMalloc, bytes);
/* cutilSafeCall(hipMalloc((void **) &ptr, bytes)); */
live_ptrs_gpu[ptr] = bytes;
#endif
return ptr;
}
void *getHostMem(size_t bytes) {
assert(is_init);
void *ptr;
#ifdef NO_OPT
CUDA_SAFE(hipHostMalloc((void **) &ptr, bytes));
#else
if(free_list_host.find(bytes)!=free_list_host.end()) {
set<void*> &lst = free_list_host.find(bytes)->second;
if(lst.size()!=0) {
ptr = resurrect_from_free_list(free_list_host, bytes, live_ptrs_host);
/* ptr = *lst.begin(); */
/* lst.erase(lst.begin()); */
/* live_ptrs_host[ptr] = bytes; */
return ptr;
}
}
else {
for(map<int,set<void *> >::iterator it=free_list_host.begin();
it != free_list_host.end(); ++it) {
if(it->first >= bytes && it->second.size()>0) {
ptr = resurrect_from_free_list(free_list_host, it->first, live_ptrs_host);
/* set<void*> &lst = it->second; */
/* ptr = *lst.begin(); */
/* lst.erase(lst.begin()); */
/* live_ptrs_gpu[ptr] = bytes; */
return ptr;
}
}
}
/* cutilSafeCall(hipHostMalloc((void **) &ptr, bytes)); */
ptr = morecore(hipHostMalloc, bytes);
live_ptrs_host[ptr] = bytes;
#endif
return ptr;
}
void freeHostMem(void *p) {
int bytes;
assert(is_init);
#ifdef NO_OPT
hipHostFree(p);
#else
assert(live_ptrs_host.find(p) != live_ptrs_host.end());
bytes = live_ptrs_host[p];
live_ptrs_host.erase(p);
free_list_host[bytes].insert(p);
#endif
}
void freeGpuMem(void *p) {
int bytes;
assert(is_init);
#ifdef NO_OPT
hipFree(p);
#else
assert(live_ptrs_gpu.find(p) != live_ptrs_gpu.end());
bytes = live_ptrs_gpu[p];
live_ptrs_gpu.erase(p);
free_list_gpu[bytes].insert(p);
#endif
}
void finalizememmodule_() {
assert(is_init);
is_init = 0;
/*there should be no live pointers*/
assert(live_ptrs_gpu.size()==0);
assert(live_ptrs_host.size()==0);
/*release all freed pointers*/
clearGpuFreeList();
clearHostFreeList();
//printf("num. resurrections=%d \t num. morecore=%d\n", num_resurrections, num_morecore);
}
}
| 487c65bdbd9168e5e7945e15a9a234c11ee7bddf.cu | //#include "common.h"
#include "header.h"
#include <map>
#include <set>
using namespace std;
/* #define NO_OPT */
extern "C" {
static int is_init=0;
static map<int,set<void*> > free_list_gpu, free_list_host;
static map<void *,int> live_ptrs_gpu, live_ptrs_host;
static void clearGpuFreeList() {
for(map<int,set<void*> >::iterator it=free_list_gpu.begin();
it!=free_list_gpu.end(); ++it) {
for(set<void*>::iterator it2=it->second.begin();
it2!=it->second.end(); ++it2) {
cudaFree(*it2);
}
}
free_list_gpu.clear();
}
static void clearHostFreeList() {
for(map<int,set<void*> >::iterator it=free_list_host.begin();
it!=free_list_host.end(); ++it) {
for(set<void*>::iterator it2=it->second.begin();
it2!=it->second.end(); ++it2) {
cudaFreeHost(*it2);
}
}
free_list_host.clear();
}
static int num_resurrections=0, num_morecore=0;
typedef cudaError (*mallocfn_t)(void **ptr, size_t bytes);
static void *morecore(mallocfn_t fn, size_t bytes) {
void *ptr;
CUDA_SAFE(fn((void **)&ptr, bytes));
num_morecore += 1;
if(ptr==NULL) {
/*try one more time*/
clearHostFreeList();
clearGpuFreeList();
fn((void **)&ptr, bytes);
}
assert(ptr!=NULL); /*We hopefully have a pointer*/
return ptr;
}
static inline void *resurrect_from_free_list(map<int,set<void *> > &free_map,
size_t bytes, map<void*,int>& liveset) {
void *ptr;
num_resurrections +=1 ;
assert(free_map.find(bytes) != free_map.end());
/* assert(free_map.find(bytes)->second.size() > 0); */
set<void *> &st = free_map.find(bytes)->second;
ptr = *st.begin();
st.erase(ptr);
if(st.size()==0)
free_map.erase(bytes);
liveset[ptr] = bytes;
return ptr;
}
void initmemmodule_() {
is_init=1;
}
void *getGpuMem(size_t bytes) {
assert(is_init);
void *ptr;
#ifdef NO_OPT
CUDA_SAFE(cudaMalloc((void **) &ptr, bytes));
#else
if(free_list_gpu.find(bytes)!=free_list_gpu.end()) {
set<void*> &lst = free_list_gpu.find(bytes)->second;
if(lst.size()!=0) {
ptr = resurrect_from_free_list(free_list_gpu, bytes, live_ptrs_gpu);
return ptr;
}
}
else {
for(map<int,set<void *> >::iterator it=free_list_gpu.begin();
it != free_list_gpu.end(); ++it) {
if(it->first >= bytes && it->second.size()>0) {
ptr = resurrect_from_free_list(free_list_gpu, it->first, live_ptrs_gpu);
return ptr;
}
}
}
ptr = morecore(cudaMalloc, bytes);
/* cutilSafeCall(cudaMalloc((void **) &ptr, bytes)); */
live_ptrs_gpu[ptr] = bytes;
#endif
return ptr;
}
void *getHostMem(size_t bytes) {
assert(is_init);
void *ptr;
#ifdef NO_OPT
CUDA_SAFE(cudaMallocHost((void **) &ptr, bytes));
#else
if(free_list_host.find(bytes)!=free_list_host.end()) {
set<void*> &lst = free_list_host.find(bytes)->second;
if(lst.size()!=0) {
ptr = resurrect_from_free_list(free_list_host, bytes, live_ptrs_host);
/* ptr = *lst.begin(); */
/* lst.erase(lst.begin()); */
/* live_ptrs_host[ptr] = bytes; */
return ptr;
}
}
else {
for(map<int,set<void *> >::iterator it=free_list_host.begin();
it != free_list_host.end(); ++it) {
if(it->first >= bytes && it->second.size()>0) {
ptr = resurrect_from_free_list(free_list_host, it->first, live_ptrs_host);
/* set<void*> &lst = it->second; */
/* ptr = *lst.begin(); */
/* lst.erase(lst.begin()); */
/* live_ptrs_gpu[ptr] = bytes; */
return ptr;
}
}
}
/* cutilSafeCall(cudaMallocHost((void **) &ptr, bytes)); */
ptr = morecore(cudaMallocHost, bytes);
live_ptrs_host[ptr] = bytes;
#endif
return ptr;
}
void freeHostMem(void *p) {
int bytes;
assert(is_init);
#ifdef NO_OPT
cudaFreeHost(p);
#else
assert(live_ptrs_host.find(p) != live_ptrs_host.end());
bytes = live_ptrs_host[p];
live_ptrs_host.erase(p);
free_list_host[bytes].insert(p);
#endif
}
void freeGpuMem(void *p) {
int bytes;
assert(is_init);
#ifdef NO_OPT
cudaFree(p);
#else
assert(live_ptrs_gpu.find(p) != live_ptrs_gpu.end());
bytes = live_ptrs_gpu[p];
live_ptrs_gpu.erase(p);
free_list_gpu[bytes].insert(p);
#endif
}
void finalizememmodule_() {
assert(is_init);
is_init = 0;
/*there should be no live pointers*/
assert(live_ptrs_gpu.size()==0);
assert(live_ptrs_host.size()==0);
/*release all freed pointers*/
clearGpuFreeList();
clearHostFreeList();
//printf("num. resurrections=%d \t num. morecore=%d\n", num_resurrections, num_morecore);
}
}
|
b302a80d269482a0b643969b0f9266eb3eaeefa4.hip | // !!! This is a file automatically generated by hipify!!!
// ---------------------------------------------------------
// Copyright (c) 2016, Andy Zeng
//
// This file is part of the APC Vision Toolbox and is available
// under the terms of the Simplified BSD License provided in
// LICENSE. Please retain this notice and LICENSE if you use
// this file (or any portion of it) in your project.
// ---------------------------------------------------------
#include "depth_utils.h"
#include "ros/ros.h"
#include "marvin_convnet/DetectObjects.h"
//#include "realsense_camera/StreamSensor.h"
#include <opencv2/opencv.hpp>
#include <cv_bridge/cv_bridge.h>
#include <image_transport/image_transport.h>
#include <sensor_msgs/image_encodings.h>
#include <opencv2/highgui/highgui.hpp>
// Marvin
#define DATATYPE 0
#include "marvin.hpp"
std::string shelf_net_arch_filename = "/home/nvidia/ctsphub-workshop-2018/04-perception/02-marvin/catkin_ws/src/marvin_convnet/models/competition/net.json";
std::string tote_net_arch_filename = "/home/nvidia/ctsphub-workshop-2018/04-perception/02-marvin/catkin_ws/src/marvin_convnet/models/competition/net.json";
std::string shelf_net_weights_filename = "/home/nvidia/ctsphub-workshop-2018/04-perception/02-marvin/catkin_ws/src/marvin_convnet/models/competition/weights_shelf.marvin";
std::string tote_net_weights_filename = "/home/nvidia/ctsphub-workshop-2018/04-perception/02-marvin/catkin_ws/src/marvin_convnet/models/competition/weights_tote.marvin";
// Service modes and names
//std::string service_name;
std::string rgb_topic_name = "/camera/rgb/image_raw";
// Directory to read/write all RGB-D files and response maps
//std::string read_directory;
// Global buffers for sensor data retrieval
int frame_width = 640;
int frame_height = 480;
uint8_t * color_buffer = new uint8_t[frame_width * frame_height * 3];
// Load Marvin FCN network architectures
marvin::Net tote_net(tote_net_arch_filename);
// Marvin responses
StorageT* color_data_CPU = NULL;
StorageT* prob_CPU_StorageT = NULL;
ComputeT* prob_CPU_ComputeT = NULL;
ros::ServiceClient client_sensor;
const int num_apc_objects = 39;
std::string shelf_bin_ids = "ABCDEFGHIJKL";
void imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
ros::NodeHandle nh_p;
ros::Publisher pub = nh_p.advertise<sensor_msgs::Image>("/mask_prediction", 1);
ROS_INFO("Recieved IMAGE topic.");
cv::Mat color_frame = cv_bridge::toCvShare(msg, "bgr8")->image;
color_buffer = color_frame.data;
// Color: BGR format, mean subtracted
for (int r = 0; r < frame_height; ++r)
for (int c = 0; c < frame_width; ++c) {
color_data_CPU[0 * frame_height * frame_width + r * frame_width + c] = CPUCompute2StorageT(ComputeT(color_buffer[0 + 3 * (c + frame_width * r)]) - ComputeT(102.9801f)); // B
color_data_CPU[1 * frame_height * frame_width + r * frame_width + c] = CPUCompute2StorageT(ComputeT(color_buffer[1 + 3 * (c + frame_width * r)]) - ComputeT(115.9465f)); // G
color_data_CPU[2 * frame_height * frame_width + r * frame_width + c] = CPUCompute2StorageT(ComputeT(color_buffer[2 + 3 * (c + frame_width * r)]) - ComputeT(122.7717f)); // R
}
// Run forward pass through marvin FCN
//ROS_INFO("Forward Marvin to get segmentation results.");
marvin::Response * rDataRGB;
marvin::Response * rProb;
rDataRGB = tote_net.getResponse("data_RGB");
rProb = tote_net.getResponse("prob");
hipMemcpy(rDataRGB->dataGPU, color_data_CPU, rDataRGB->numBytes(), hipMemcpyHostToDevice);
tote_net.forward();
hipMemcpy(prob_CPU_StorageT, rProb->dataGPU, rProb->numBytes(), hipMemcpyDeviceToHost);
for (int i = 0; i < frame_height * frame_width * (num_apc_objects + 1); ++i)
prob_CPU_ComputeT[i] = CPUStorage2ComputeT(prob_CPU_StorageT[i]);
// Get full object list
std::vector<std::string> all_object_names = {"background", "barkely_hide_bones", "cherokee_easy_tee_shirt", "clorox_utility_brush", "cloud_b_plush_bear", "command_hooks", "cool_shot_glue_sticks", "crayola_24_ct", "creativity_chenille_stems", "dasani_water_bottle",
"dove_beauty_bar", "dr_browns_bottle_brush", "easter_turtle_sippy_cup", "elmers_washable_no_run_school_glue", "expo_dry_erase_board_eraser", "fiskars_scissors_red", "fitness_gear_3lb_dumbbell", "folgers_classic_roast_coffee", "hanes_tube_socks", "i_am_a_bunny_book",
"jane_eyre_dvd", "kleenex_paper_towels", "kleenex_tissue_box", "kyjen_squeakin_eggs_plush_puppies", "laugh_out_loud_joke_book", "oral_b_toothbrush_green", "oral_b_toothbrush_red", "peva_shower_curtain_liner", "platinum_pets_dog_bowl", "rawlings_baseball",
"rolodex_jumbo_pencil_cup", "safety_first_outlet_plugs", "scotch_bubble_mailer", "scotch_duct_tape", "soft_white_lightbulb", "staples_index_cards", "ticonderoga_12_pencils", "up_glucose_bottle", "womens_knit_gloves", "woods_extension_cord"};
std::vector<std::string> selected_object_names = {"dove_beauty_bar"};//{"kleenex_paper_towels", "kleenex_tissue_box","crayola_24_ct"};
unsigned short dove_color[3] = {255,255,255};
//unsigned short viva_color[3] = {0,0,128}; // viva = 0 0 128
//unsigned short kleenex_color[3] = {128,128,0}; // kleenex = 128 128 0
//unsigned short crayola_color[3] = {0,128,0}; // crayloa = 0 128 0
// Remove duplicates in selected object list
// Loop through each object in selected list
for (int selected_idx = 0; selected_idx < 1; selected_idx++) { //selected_object_names.size()
std::string curr_object_name = selected_object_names[selected_idx];
int curr_object_idx = std::distance(all_object_names.begin(), find(all_object_names.begin(), all_object_names.end(), curr_object_name));
std::vector<ComputeT> predMap_object(prob_CPU_ComputeT + curr_object_idx * frame_height * frame_width, prob_CPU_ComputeT + (curr_object_idx + 1) * frame_height * frame_width);
/*
curr_object_name = selected_object_names[selected_idx+1];
curr_object_idx = std::distance(all_object_names.begin(), find(all_object_names.begin(), all_object_names.end(), curr_object_name));
std::cout << curr_object_idx << " , "<< curr_object_name << std::endl;
std::vector<ComputeT> predMap_object_1(prob_CPU_ComputeT + curr_object_idx * frame_height * frame_width, prob_CPU_ComputeT + (curr_object_idx + 1) * frame_height * frame_width);
*/
/*
curr_object_name = selected_object_names[selected_idx+2];
curr_object_idx = std::distance(all_object_names.begin(), find(all_object_names.begin(), all_object_names.end(), curr_object_name));
std::cout << curr_object_idx << " , "<< curr_object_name << std::endl;
std::vector<ComputeT> predMap_object_2(prob_CPU_ComputeT + curr_object_idx * frame_height * frame_width, prob_CPU_ComputeT + (curr_object_idx + 1) * frame_height * frame_width);
*/
cv::Mat result_mat(480, 640, CV_8UC3);
for (size_t y = 0; y < frame_height; y++)
for (size_t x = 0; x < frame_width; x++) {
ComputeT p_0 = (predMap_object[y * frame_width + x]);
//ComputeT p_1 = (predMap_object_1[y * frame_width + x]);
//ComputeT p_2 = (predMap_object_2[y * frame_width + x]);
int max_class = 0;
ComputeT max_value = p_0;
ComputeT max_value_R = 0;
ComputeT max_value_G = 0;
ComputeT max_value_B = 0;
if (max_value <=0.01)
max_value = 0;
if (max_value >0.01)
max_value = 1;
max_value_R = max_value*dove_color[0];
max_value_G = max_value*dove_color[1];
max_value_B = max_value*dove_color[2];
result_mat.at<cv::Vec3b>(y, x)[0] = (unsigned short)max_value_R;
result_mat.at<cv::Vec3b>(y, x)[1] = (unsigned short)max_value_G;
result_mat.at<cv::Vec3b>(y, x)[2] = (unsigned short)max_value_B;
}
cv_bridge::CvImage cv_image;
cv::Mat result_mat_final(480, 640, CV_8UC3);
result_mat_final = result_mat;
cv_image.image = result_mat_final;
cv_image.encoding = "rgb8";
sensor_msgs::Image ros_image;
cv_image.toImageMsg(ros_image);
pub.publish(ros_image);
}
}
int main(int argc, char **argv) {
// Setup ROS
ros::init(argc, argv, "marvin_convnet", ros::init_options::AnonymousName);
ros::NodeHandle nh;
ros::NodeHandle nh_p;
// Setup Marvin
ROS_INFO("Loading Marvin.");
tote_net.Malloc(marvin::Testing);
tote_net.loadWeights(tote_net_weights_filename);
color_data_CPU = new StorageT[frame_width * frame_height * 3];
prob_CPU_StorageT = new StorageT[frame_width * frame_height * (num_apc_objects + 1)];
prob_CPU_ComputeT = new ComputeT[frame_height * frame_width * (num_apc_objects + 1)];
ROS_INFO("Marvin Ready.");
image_transport::ImageTransport it(nh);
image_transport::Subscriber sub = it.subscribe(rgb_topic_name, 1, imageCallback);
ROS_INFO("Image topic ready to recieve.");
ros::spin();
return 0;
}
| b302a80d269482a0b643969b0f9266eb3eaeefa4.cu | // ---------------------------------------------------------
// Copyright (c) 2016, Andy Zeng
//
// This file is part of the APC Vision Toolbox and is available
// under the terms of the Simplified BSD License provided in
// LICENSE. Please retain this notice and LICENSE if you use
// this file (or any portion of it) in your project.
// ---------------------------------------------------------
#include "depth_utils.h"
#include "ros/ros.h"
#include "marvin_convnet/DetectObjects.h"
//#include "realsense_camera/StreamSensor.h"
#include <opencv2/opencv.hpp>
#include <cv_bridge/cv_bridge.h>
#include <image_transport/image_transport.h>
#include <sensor_msgs/image_encodings.h>
#include <opencv2/highgui/highgui.hpp>
// Marvin
#define DATATYPE 0
#include "marvin.hpp"
std::string shelf_net_arch_filename = "/home/nvidia/ctsphub-workshop-2018/04-perception/02-marvin/catkin_ws/src/marvin_convnet/models/competition/net.json";
std::string tote_net_arch_filename = "/home/nvidia/ctsphub-workshop-2018/04-perception/02-marvin/catkin_ws/src/marvin_convnet/models/competition/net.json";
std::string shelf_net_weights_filename = "/home/nvidia/ctsphub-workshop-2018/04-perception/02-marvin/catkin_ws/src/marvin_convnet/models/competition/weights_shelf.marvin";
std::string tote_net_weights_filename = "/home/nvidia/ctsphub-workshop-2018/04-perception/02-marvin/catkin_ws/src/marvin_convnet/models/competition/weights_tote.marvin";
// Service modes and names
//std::string service_name;
std::string rgb_topic_name = "/camera/rgb/image_raw";
// Directory to read/write all RGB-D files and response maps
//std::string read_directory;
// Global buffers for sensor data retrieval
int frame_width = 640;
int frame_height = 480;
uint8_t * color_buffer = new uint8_t[frame_width * frame_height * 3];
// Load Marvin FCN network architectures
marvin::Net tote_net(tote_net_arch_filename);
// Marvin responses
StorageT* color_data_CPU = NULL;
StorageT* prob_CPU_StorageT = NULL;
ComputeT* prob_CPU_ComputeT = NULL;
ros::ServiceClient client_sensor;
const int num_apc_objects = 39;
std::string shelf_bin_ids = "ABCDEFGHIJKL";
void imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
ros::NodeHandle nh_p;
ros::Publisher pub = nh_p.advertise<sensor_msgs::Image>("/mask_prediction", 1);
ROS_INFO("Recieved IMAGE topic.");
cv::Mat color_frame = cv_bridge::toCvShare(msg, "bgr8")->image;
color_buffer = color_frame.data;
// Color: BGR format, mean subtracted
for (int r = 0; r < frame_height; ++r)
for (int c = 0; c < frame_width; ++c) {
color_data_CPU[0 * frame_height * frame_width + r * frame_width + c] = CPUCompute2StorageT(ComputeT(color_buffer[0 + 3 * (c + frame_width * r)]) - ComputeT(102.9801f)); // B
color_data_CPU[1 * frame_height * frame_width + r * frame_width + c] = CPUCompute2StorageT(ComputeT(color_buffer[1 + 3 * (c + frame_width * r)]) - ComputeT(115.9465f)); // G
color_data_CPU[2 * frame_height * frame_width + r * frame_width + c] = CPUCompute2StorageT(ComputeT(color_buffer[2 + 3 * (c + frame_width * r)]) - ComputeT(122.7717f)); // R
}
// Run forward pass through marvin FCN
//ROS_INFO("Forward Marvin to get segmentation results.");
marvin::Response * rDataRGB;
marvin::Response * rProb;
rDataRGB = tote_net.getResponse("data_RGB");
rProb = tote_net.getResponse("prob");
cudaMemcpy(rDataRGB->dataGPU, color_data_CPU, rDataRGB->numBytes(), cudaMemcpyHostToDevice);
tote_net.forward();
cudaMemcpy(prob_CPU_StorageT, rProb->dataGPU, rProb->numBytes(), cudaMemcpyDeviceToHost);
for (int i = 0; i < frame_height * frame_width * (num_apc_objects + 1); ++i)
prob_CPU_ComputeT[i] = CPUStorage2ComputeT(prob_CPU_StorageT[i]);
// Get full object list
std::vector<std::string> all_object_names = {"background", "barkely_hide_bones", "cherokee_easy_tee_shirt", "clorox_utility_brush", "cloud_b_plush_bear", "command_hooks", "cool_shot_glue_sticks", "crayola_24_ct", "creativity_chenille_stems", "dasani_water_bottle",
"dove_beauty_bar", "dr_browns_bottle_brush", "easter_turtle_sippy_cup", "elmers_washable_no_run_school_glue", "expo_dry_erase_board_eraser", "fiskars_scissors_red", "fitness_gear_3lb_dumbbell", "folgers_classic_roast_coffee", "hanes_tube_socks", "i_am_a_bunny_book",
"jane_eyre_dvd", "kleenex_paper_towels", "kleenex_tissue_box", "kyjen_squeakin_eggs_plush_puppies", "laugh_out_loud_joke_book", "oral_b_toothbrush_green", "oral_b_toothbrush_red", "peva_shower_curtain_liner", "platinum_pets_dog_bowl", "rawlings_baseball",
"rolodex_jumbo_pencil_cup", "safety_first_outlet_plugs", "scotch_bubble_mailer", "scotch_duct_tape", "soft_white_lightbulb", "staples_index_cards", "ticonderoga_12_pencils", "up_glucose_bottle", "womens_knit_gloves", "woods_extension_cord"};
std::vector<std::string> selected_object_names = {"dove_beauty_bar"};//{"kleenex_paper_towels", "kleenex_tissue_box","crayola_24_ct"};
unsigned short dove_color[3] = {255,255,255};
//unsigned short viva_color[3] = {0,0,128}; // viva = 0 0 128
//unsigned short kleenex_color[3] = {128,128,0}; // kleenex = 128 128 0
//unsigned short crayola_color[3] = {0,128,0}; // crayloa = 0 128 0
// Remove duplicates in selected object list
// Loop through each object in selected list
for (int selected_idx = 0; selected_idx < 1; selected_idx++) { //selected_object_names.size()
std::string curr_object_name = selected_object_names[selected_idx];
int curr_object_idx = std::distance(all_object_names.begin(), find(all_object_names.begin(), all_object_names.end(), curr_object_name));
std::vector<ComputeT> predMap_object(prob_CPU_ComputeT + curr_object_idx * frame_height * frame_width, prob_CPU_ComputeT + (curr_object_idx + 1) * frame_height * frame_width);
/*
curr_object_name = selected_object_names[selected_idx+1];
curr_object_idx = std::distance(all_object_names.begin(), find(all_object_names.begin(), all_object_names.end(), curr_object_name));
std::cout << curr_object_idx << " , "<< curr_object_name << std::endl;
std::vector<ComputeT> predMap_object_1(prob_CPU_ComputeT + curr_object_idx * frame_height * frame_width, prob_CPU_ComputeT + (curr_object_idx + 1) * frame_height * frame_width);
*/
/*
curr_object_name = selected_object_names[selected_idx+2];
curr_object_idx = std::distance(all_object_names.begin(), find(all_object_names.begin(), all_object_names.end(), curr_object_name));
std::cout << curr_object_idx << " , "<< curr_object_name << std::endl;
std::vector<ComputeT> predMap_object_2(prob_CPU_ComputeT + curr_object_idx * frame_height * frame_width, prob_CPU_ComputeT + (curr_object_idx + 1) * frame_height * frame_width);
*/
cv::Mat result_mat(480, 640, CV_8UC3);
for (size_t y = 0; y < frame_height; y++)
for (size_t x = 0; x < frame_width; x++) {
ComputeT p_0 = (predMap_object[y * frame_width + x]);
//ComputeT p_1 = (predMap_object_1[y * frame_width + x]);
//ComputeT p_2 = (predMap_object_2[y * frame_width + x]);
int max_class = 0;
ComputeT max_value = p_0;
ComputeT max_value_R = 0;
ComputeT max_value_G = 0;
ComputeT max_value_B = 0;
if (max_value <=0.01)
max_value = 0;
if (max_value >0.01)
max_value = 1;
max_value_R = max_value*dove_color[0];
max_value_G = max_value*dove_color[1];
max_value_B = max_value*dove_color[2];
result_mat.at<cv::Vec3b>(y, x)[0] = (unsigned short)max_value_R;
result_mat.at<cv::Vec3b>(y, x)[1] = (unsigned short)max_value_G;
result_mat.at<cv::Vec3b>(y, x)[2] = (unsigned short)max_value_B;
}
cv_bridge::CvImage cv_image;
cv::Mat result_mat_final(480, 640, CV_8UC3);
result_mat_final = result_mat;
cv_image.image = result_mat_final;
cv_image.encoding = "rgb8";
sensor_msgs::Image ros_image;
cv_image.toImageMsg(ros_image);
pub.publish(ros_image);
}
}
int main(int argc, char **argv) {
// Setup ROS
ros::init(argc, argv, "marvin_convnet", ros::init_options::AnonymousName);
ros::NodeHandle nh;
ros::NodeHandle nh_p;
// Setup Marvin
ROS_INFO("Loading Marvin.");
tote_net.Malloc(marvin::Testing);
tote_net.loadWeights(tote_net_weights_filename);
color_data_CPU = new StorageT[frame_width * frame_height * 3];
prob_CPU_StorageT = new StorageT[frame_width * frame_height * (num_apc_objects + 1)];
prob_CPU_ComputeT = new ComputeT[frame_height * frame_width * (num_apc_objects + 1)];
ROS_INFO("Marvin Ready.");
image_transport::ImageTransport it(nh);
image_transport::Subscriber sub = it.subscribe(rgb_topic_name, 1, imageCallback);
ROS_INFO("Image topic ready to recieve.");
ros::spin();
return 0;
}
|
b83c273c883168dbfc4aaf1b1c4a7f17afd3580d.hip | // !!! This is a file automatically generated by hipify!!!
//------------------------------------------------------------------------------
// GB_cuda_get_device_properties.cu: get the properties of a GPU
//------------------------------------------------------------------------------
// SPDX-License-Identifier: Apache-2.0
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
#include "GB_cuda.h"
//------------------------------------------------------------------------------
// GB_cuda_get_device: get the current GPU
//------------------------------------------------------------------------------
bool GB_cuda_get_device (int &device)
{
if (&device == NULL)
{
// invalid inputs
return (false) ;
}
CHECK_CUDA_SIMPLE (hipGetDevice (&device)) ;
return (true) ;
}
//------------------------------------------------------------------------------
// GB_cuda_set_device: set the current GPU
//------------------------------------------------------------------------------
bool GB_cuda_set_device (int device)
{
if (device < 0)
{
// invalid inputs
return (false) ;
}
CHECK_CUDA_SIMPLE (hipSetDevice (device)) ;
return (true) ;
}
//------------------------------------------------------------------------------
// GB_cuda_get_device_properties: determine all properties of a single GPU
//------------------------------------------------------------------------------
bool GB_cuda_get_device_properties // true if OK, false if failure
(
int device,
GB_cuda_device *prop
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
if (prop == NULL || device < 0)
{
// invalid inputs
return (false) ;
}
// clear the GPU settings
memset (prop, 0, sizeof (GB_cuda_device)) ;
int old_device ;
CHECK_CUDA_SIMPLE ( hipGetDevice( &old_device ) ) ;
//--------------------------------------------------------------------------
// get the properties
//--------------------------------------------------------------------------
int num_sms, compute_capability_major, compute_capability_minor ;
size_t memfree, memtotal ;
CHECK_CUDA_SIMPLE( hipDeviceGetAttribute (&num_sms,
hipDeviceAttributeMultiprocessorCount,
device) ) ;
CHECK_CUDA_SIMPLE( hipDeviceGetAttribute (&compute_capability_major,
hipDeviceAttributeComputeCapabilityMajor,
device) ) ;
CHECK_CUDA_SIMPLE( hipDeviceGetAttribute (&compute_capability_minor,
hipDeviceAttributeComputeCapabilityMajor,
device) ) ;
CHECK_CUDA_SIMPLE ( hipSetDevice( device ) ) ;
CHECK_CUDA_SIMPLE ( hipMemGetInfo( & memfree, &memtotal) ) ;
CHECK_CUDA_SIMPLE ( hipSetDevice( old_device ) ) ;
prop->total_global_memory = memtotal ;
prop->number_of_sms = num_sms ;
prop->compute_capability_major = compute_capability_major ;
prop->compute_capability_minor = compute_capability_minor ;
printf ("Device: %d: memory: %ld SMs: %d compute: %d.%d\n",
device, prop->total_global_memory, prop->number_of_sms,
prop->compute_capability_major, prop->compute_capability_minor) ;
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
return (true) ;
}
| b83c273c883168dbfc4aaf1b1c4a7f17afd3580d.cu | //------------------------------------------------------------------------------
// GB_cuda_get_device_properties.cu: get the properties of a GPU
//------------------------------------------------------------------------------
// SPDX-License-Identifier: Apache-2.0
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
#include "GB_cuda.h"
//------------------------------------------------------------------------------
// GB_cuda_get_device: get the current GPU
//------------------------------------------------------------------------------
bool GB_cuda_get_device (int &device)
{
if (&device == NULL)
{
// invalid inputs
return (false) ;
}
CHECK_CUDA_SIMPLE (cudaGetDevice (&device)) ;
return (true) ;
}
//------------------------------------------------------------------------------
// GB_cuda_set_device: set the current GPU
//------------------------------------------------------------------------------
bool GB_cuda_set_device (int device)
{
if (device < 0)
{
// invalid inputs
return (false) ;
}
CHECK_CUDA_SIMPLE (cudaSetDevice (device)) ;
return (true) ;
}
//------------------------------------------------------------------------------
// GB_cuda_get_device_properties: determine all properties of a single GPU
//------------------------------------------------------------------------------
bool GB_cuda_get_device_properties // true if OK, false if failure
(
int device,
GB_cuda_device *prop
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
if (prop == NULL || device < 0)
{
// invalid inputs
return (false) ;
}
// clear the GPU settings
memset (prop, 0, sizeof (GB_cuda_device)) ;
int old_device ;
CHECK_CUDA_SIMPLE ( cudaGetDevice( &old_device ) ) ;
//--------------------------------------------------------------------------
// get the properties
//--------------------------------------------------------------------------
int num_sms, compute_capability_major, compute_capability_minor ;
size_t memfree, memtotal ;
CHECK_CUDA_SIMPLE( cudaDeviceGetAttribute (&num_sms,
cudaDevAttrMultiProcessorCount,
device) ) ;
CHECK_CUDA_SIMPLE( cudaDeviceGetAttribute (&compute_capability_major,
cudaDevAttrComputeCapabilityMajor,
device) ) ;
CHECK_CUDA_SIMPLE( cudaDeviceGetAttribute (&compute_capability_minor,
cudaDevAttrComputeCapabilityMajor,
device) ) ;
CHECK_CUDA_SIMPLE ( cudaSetDevice( device ) ) ;
CHECK_CUDA_SIMPLE ( cudaMemGetInfo( & memfree, &memtotal) ) ;
CHECK_CUDA_SIMPLE ( cudaSetDevice( old_device ) ) ;
prop->total_global_memory = memtotal ;
prop->number_of_sms = num_sms ;
prop->compute_capability_major = compute_capability_major ;
prop->compute_capability_minor = compute_capability_minor ;
printf ("Device: %d: memory: %ld SMs: %d compute: %d.%d\n",
device, prop->total_global_memory, prop->number_of_sms,
prop->compute_capability_major, prop->compute_capability_minor) ;
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
return (true) ;
}
|
818944e35a3cb1a7ee5887284e6448e4206aad79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/TensorTransformations.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <c10/macros/Macros.h>
#include <cstddef>
#include <vector>
namespace at {
namespace native {
template <typename scalar_t, typename IndexType>
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
C10_LAUNCH_BOUNDS_2(cuda::getApplyBlockSize(), cuda::getApplyBlocksPerSM())
#endif
__global__ void kernel_pointwise_flip_apply2(
const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info,
cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info,
IndexType N,
int flip_dim,
IndexType total_dims) {
for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) {
IndexType dst_offset = 0;
if (flip_dim == 0) {
// flip 1st dim
dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0];
}
else {
// flip last dim
IndexType i = total_dims - 1;
dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]);
}
out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index];
}
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize())
__global__ void flip_cuda_kernel(
scalar_t* in_tensor,
scalar_t* out_tensor,
int64_t N,
int64_t* flip_dims,
int64_t flip_dims_size,
int64_t* strides,
int64_t* strides_contiguous,
int64_t* shape,
int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
int64_t cur_indices = linear_index, rem = 0, dst_offset = 0;
for (int64_t i = 0; i < total_dims; i++) {
int64_t temp = cur_indices;
cur_indices = cur_indices / strides_contiguous[i];
rem = temp - cur_indices * strides_contiguous[i];
// flip the indices if it is in flip_dims
for (int64_t j = 0; j < flip_dims_size; j++) {
if (i == flip_dims[j]) {
cur_indices = shape[i] - 1 - cur_indices;
}
}
dst_offset += cur_indices * strides[i];
cur_indices = rem;
}
out_tensor[linear_index] = in_tensor[dst_offset];
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize())
__global__ void roll_cuda_kernel(
scalar_t* in_tensor,
scalar_t* out_tensor,
int64_t N,
int64_t roll_dim,
int64_t start,
int64_t size,
int64_t stride,
int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
// roll dim idx is the index of linear_index along the rolling dimension.
int64_t roll_dim_idx = linear_index % (stride * size) / stride;
// index into the source data to find appropriate value.
int64_t source_idx = 0;
if( roll_dim_idx >= (size - start) ) {
source_idx = linear_index - ((size - start) * stride);
} else {
source_idx = linear_index + (start * stride);
}
out_tensor[linear_index] = in_tensor[source_idx];
}
// Roll a tensor along a dimension
Tensor roll_cuda(const Tensor& self, IntArrayRef shifts, IntArrayRef dims) {
if (dims.size() != 1 || shifts.size() != 1) {
return roll_common(self, shifts, dims);
}
auto in_tensor = self;
if(!self.is_contiguous()) {
in_tensor = self.contiguous();
}
auto out_tensor = at::empty_like(in_tensor, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (out_tensor.numel() == 0) {
return out_tensor;
}
const int64_t N = in_tensor.numel();
const int64_t dim = dims[0];
const int64_t size = in_tensor.size(dim);
int64_t start = (size - shifts[0]) % size;
// Behavior of % is different in C++ vs Python for negative numbers. This
// corrects the difference.
if( start < 0 ) start = start + size;
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
TORCH_CHECK(cuda::getApplyGrid(N, dim_grid, in_tensor.get_device()), "unable to get dim grid");
auto total_dims = in_tensor.dim();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
in_tensor.scalar_type(), "roll_cuda",
[&] {
hipLaunchKernelGGL(( roll_cuda_kernel), dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
in_tensor.data_ptr<scalar_t>(), out_tensor.data_ptr<scalar_t>(), N,
dim, start,
size,
in_tensor.stride(dim),
total_dims);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
return out_tensor;
}
}} // namespace at::native
| 818944e35a3cb1a7ee5887284e6448e4206aad79.cu | #include <ATen/native/TensorTransformations.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <c10/macros/Macros.h>
#include <cstddef>
#include <vector>
namespace at {
namespace native {
template <typename scalar_t, typename IndexType>
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
C10_LAUNCH_BOUNDS_2(cuda::getApplyBlockSize(), cuda::getApplyBlocksPerSM())
#endif
__global__ void kernel_pointwise_flip_apply2(
const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info,
cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info,
IndexType N,
int flip_dim,
IndexType total_dims) {
for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) {
IndexType dst_offset = 0;
if (flip_dim == 0) {
// flip 1st dim
dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0];
}
else {
// flip last dim
IndexType i = total_dims - 1;
dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]);
}
out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index];
}
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize())
__global__ void flip_cuda_kernel(
scalar_t* in_tensor,
scalar_t* out_tensor,
int64_t N,
int64_t* flip_dims,
int64_t flip_dims_size,
int64_t* strides,
int64_t* strides_contiguous,
int64_t* shape,
int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
int64_t cur_indices = linear_index, rem = 0, dst_offset = 0;
for (int64_t i = 0; i < total_dims; i++) {
int64_t temp = cur_indices;
cur_indices = cur_indices / strides_contiguous[i];
rem = temp - cur_indices * strides_contiguous[i];
// flip the indices if it is in flip_dims
for (int64_t j = 0; j < flip_dims_size; j++) {
if (i == flip_dims[j]) {
cur_indices = shape[i] - 1 - cur_indices;
}
}
dst_offset += cur_indices * strides[i];
cur_indices = rem;
}
out_tensor[linear_index] = in_tensor[dst_offset];
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize())
__global__ void roll_cuda_kernel(
scalar_t* in_tensor,
scalar_t* out_tensor,
int64_t N,
int64_t roll_dim,
int64_t start,
int64_t size,
int64_t stride,
int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
// roll dim idx is the index of linear_index along the rolling dimension.
int64_t roll_dim_idx = linear_index % (stride * size) / stride;
// index into the source data to find appropriate value.
int64_t source_idx = 0;
if( roll_dim_idx >= (size - start) ) {
source_idx = linear_index - ((size - start) * stride);
} else {
source_idx = linear_index + (start * stride);
}
out_tensor[linear_index] = in_tensor[source_idx];
}
// Roll a tensor along a dimension
Tensor roll_cuda(const Tensor& self, IntArrayRef shifts, IntArrayRef dims) {
if (dims.size() != 1 || shifts.size() != 1) {
return roll_common(self, shifts, dims);
}
auto in_tensor = self;
if(!self.is_contiguous()) {
in_tensor = self.contiguous();
}
auto out_tensor = at::empty_like(in_tensor, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (out_tensor.numel() == 0) {
return out_tensor;
}
const int64_t N = in_tensor.numel();
const int64_t dim = dims[0];
const int64_t size = in_tensor.size(dim);
int64_t start = (size - shifts[0]) % size;
// Behavior of % is different in C++ vs Python for negative numbers. This
// corrects the difference.
if( start < 0 ) start = start + size;
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
TORCH_CHECK(cuda::getApplyGrid(N, dim_grid, in_tensor.get_device()), "unable to get dim grid");
auto total_dims = in_tensor.dim();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
in_tensor.scalar_type(), "roll_cuda",
[&] {
roll_cuda_kernel<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
in_tensor.data_ptr<scalar_t>(), out_tensor.data_ptr<scalar_t>(), N,
dim, start,
size,
in_tensor.stride(dim),
total_dims);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
return out_tensor;
}
}} // namespace at::native
|
a0e118c9e82e2d1c51172adedacd24c27d57ba39.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel_hip.cuh"
#define PI 3.14159265358979f
//these are original values, DO NOT FORGET TO SAVE THEM
__constant__ int oHairCount_device[sizeof(int)];
__constant__ float oHeadRadius_device[sizeof(float)];
__constant__ float oHairLength_device[sizeof(float)];
//__device__ int oHairCount_device;
//__device__ float oHeadRadius_device;
//__device__ float oHairLength_device;
int oHairCount_kernel;
float oHeadRadius_kernel;
float oHairLength_kernel;
//lookup table for sin -- for alpha Radians access alpha/PI*2000 --- for values bigger than PI return -sin(alpha-PI)
//__constant__ float sinT[2001];
//used for setting up the scene
//head position is assumed to be at 0
//headRadius is radius of the head, which is spherical
//hairLength is length of each hairstrand
void SaveOriginalSettingsGPU(float headRadius, int hairCount, float hairLength)
{
//set the __constant__ values
gpuErrchk(hipMemcpyToSymbol(oHeadRadius_device, &headRadius, sizeof(float)));
gpuErrchk(hipMemcpyToSymbol(oHairCount_device, &hairCount, sizeof(int)));
gpuErrchk(hipMemcpyToSymbol(oHairLength_device, &hairLength, sizeof(float)));
oHairCount_kernel = hairCount;
oHeadRadius_kernel = headRadius;
oHairLength_kernel = hairLength;
}
//creates look up table for sin
//void CreateSinLookUpTable()
//{
// float sine[2001];
// for (int index = 0; index < 2001; index++)
// {
// sine[index] = sinf(PI * (index - 1000) / 1000.0);
// }
//
// gpuErrchk(hipMemcpyToSymbol(sinT, sine, 2001*sizeof(float)));
//}
//gets hairCount, hairRadius & hair *hairPoints
//sets hairPoints.startingPoints--groups of 32-- to appropriate values
//hair only comes out in 3/4th of head -- look at the bottom of this script for a visualization on where the hair comes out etc.
//returns void, changes hairPoints.startPoints
void HairPointASetterGPU(int hairCount, float headRadius, hair *hairPoints)
{
if (sizeof(hairPoints) == 0)
{
std::cout << "hairPoints haven't been malloc'd when calling HairPointASetter" << std::endl;
return;
}
float3 v;
float remainingDist;// used for calculating positions of y and x correctly
srand(static_cast <unsigned> (time(0))); //seed random
for (int i =0; i <hairCount; i+=32)
{
//z can be [-headRadius,headRadius]
v.z = -headRadius + static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / (2 * headRadius)));
float z = v.z;//used for creating 31 similar hairs
remainingDist = sqrtf(headRadius * headRadius - v.z*v.z);
if (v.z > 0)// y cant be negative if z is positive ##look at the bottom of the script for explanation
{
if (remainingDist == 0)v.y = 0;//division by 0 prevention
else v.y = static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / (2 * remainingDist))); //y can be [0,remainingDist]
}
else
{
//y can be [-remainingDist,remainingDist]
if (remainingDist == 0) v.y = 0;//division by 0 prevention
else v.y = -remainingDist + static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / (2 * remainingDist)));
}
float y = v.y;//used for creating 31 similar hairs
remainingDist = sqrtf(headRadius * headRadius - v.z*v.z - v.y*v.y);
//x can be -remainingDist or remainingDist
if (remainingDist == 0) v.x = 0;//divison by 0 prevention
else v.x = remainingDist * (static_cast<float>(rand()) > (static_cast<float> (RAND_MAX) / 2.0f) ? 1 : -1);
float x = v.x;//used for creating 31 similar hairs
if (isnan(v.x) || isnan(v.y) || isnan(v.z) || headRadius * headRadius - length(v) < 0)//recalculate
{
i-=32;
continue;
}
//set starting point
hairPoints[i].startPoint = v;
//we created a hair now create 31 similar hairs
for (int j = 1; j < 32; j++)
{
v.z = z + static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / (0.2f)));//z change can be [-0.1f,0.1f]
remainingDist = sqrtf(headRadius * headRadius - v.z*v.z);
if (v.z > 0)// y cant be negative if z is positive ##look at the bottom of the script for explanation
{
if (remainingDist == 0)v.y = 0;//division by 0 prevention
else v.y = static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / (2 * remainingDist))); //y can be [0,remainingDist]
}
else
{
//y can be [-remainingDist,remainingDist]
if (remainingDist == 0) v.y = 0;//division by 0 prevention
else v.y = -remainingDist + static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / (2 * remainingDist)));
}
if ((y > 0 && v.y < 0) || (y < 0 && v.y>0)) v.y *= -1;
remainingDist = sqrtf(headRadius * headRadius - v.z*v.z - v.y*v.y);
//x can be -remainingDist or remainingDist
if (remainingDist == 0) v.x = 0;//divison by 0 prevention
else v.x = remainingDist * (static_cast<float>(rand()) > (static_cast<float> (RAND_MAX) / 2.0f) ? 1 : -1);
if ((x > 0 && v.x < 0) || (x < 0 && v.x>0)) v.x *= -1;
if (isnan(v.x) || isnan(v.y) || isnan(v.z) || headRadius * headRadius - length(v) < 0)//recalculate
{
j--;
continue;
}
//set starting point
hairPoints[i+j].startPoint = v;
}
}
}
//hair points is pointer to hair data
//wind origin is a char {+-X, +-Y, +-Z}, denotes the origin point of the wind, i.e wind blows from wind origin towards head
//wind strength is a float used for determining the amount each individual hair moves-rotates
//hairPartitionSize is the hairs that each
//smoothing is the amount of steps required for each hair strand to reach peak position
//recieves hairCount, headRadius from global variable(s) oHairCount, oHeadRadius
//TODO additional feature: can use this routine multiple times without stopping all winds
//returns void*(must), changes the direction-rotation of hair OVER 'TIME' UP TO PEAK POSITION depending on smoothing by calling startWind method on GPU
void ApplyWindV0(hair *hairPoints,wind w, int blockSize, int blockCount, int smoothing)
{
if (blockSize*blockCount != oHairCount_kernel)
{
std::cout<<"block size * block count != hair count when calling ApplyWindV0"<<std::endl;
//exit(-1);
return;
}
if (w.strength == 0)
{
return;
}
if (hairPoints == NULL)
{
std::cout << "hairPoints is not malloc'd when calling ApplyWindV0";
//exit(-1);
return;
}
if ((w.axis != 'X' && w.axis != 'Y' && w.axis != 'Z') || (w.axisSign != '+' && w.axisSign != '-'))
{
std::cout << "wind is not set correctly when calling ApplyWindV0";
//exit(-1);
return;
}
for (int j = 0; j<smoothing; j++)//at each smoothing step
{
float smoothedStrength = w.strength*(j + 1) / smoothing;
//call appropriate function based on wind direction
if (w.axis == 'X')
{
std::cout << "X winds are not implemented yet" << std::endl; return;
}
else if (w.axis == 'Y')
{
std::cout << "Y winds are not implemented yet" << std::endl; return;
}
else//w.axis=='Z'
{
hipLaunchKernelGGL(( StartWindZV2) , dim3(blockCount), dim3(blockSize) , 0, 0, hairPoints, w.axisSign, smoothedStrength);
gpuErrchk(hipPeekAtLastError());
hipLaunchKernelGGL(( CollisionDetectionV1) , dim3(blockCount),dim3(blockSize), 0, 0, hairPoints);
gpuErrchk(hipPeekAtLastError());
}
}
}
//look at V0 for more info on function
//trying to unroll loops
__global__ void StartWindZV2(hair *hairPoints, char sign, float strength)
{
//look at applywind()--cpu implementation-- for more info
int tid = blockDim.x*blockIdx.x + threadIdx.x;
//change y,z points, x is always same
//if sign is +, z point gets decreased vice versa
//find effective wind strength--same on all points of the hair
#define effectiveStrengthV2 ((0.75f + (hairPoints[tid].startPoint.z) / (4 * *oHeadRadius_device))*strength)
//TODO possible performance improvement: instead of calculating Zdist&y point seperataly, do sin cos calculations(like rotation)
//calculate nEndPoint
float3 nEndPoint;
nEndPoint.x = hairPoints[tid].startPoint.x;
nEndPoint.y = (hairPoints[tid].startPoint.y - hairPoints[tid].endPoint.y)*effectiveStrengthV2 + hairPoints[tid].endPoint.y;
#define ZdistV2 (sqrtf(*oHairLength_device * *oHairLength_device - (nEndPoint.y - hairPoints[tid].startPoint.y)*(nEndPoint.y - hairPoints[tid].startPoint.y)))
if (sign == '+')
{
nEndPoint.z = hairPoints[tid].startPoint.z - ZdistV2;
}
else//sign == '-'
{
nEndPoint.z = hairPoints[tid].startPoint.z + ZdistV2;
}
float3 * nNonSmoothInterpolatedPoints, *nSmoothInterpolatedPoints;
nNonSmoothInterpolatedPoints = (float3*)malloc(sizeof(float3)*hairPoints[tid].interpolatedPointSize);
nSmoothInterpolatedPoints = (float3*)malloc(sizeof(float3)*hairPoints[tid].interpolatedPointSize);
//calculate nNonSmoothInterpolatedPoints for each interpolated point, then set nSmoothInterpolatedPoints
#pragma unroll
for (int i = 0; i < hairPoints[tid].interpolatedPointSize; i++)
{
//smoothPoint calculations
nNonSmoothInterpolatedPoints[i] = (nEndPoint - hairPoints[tid].startPoint)*((float)(i + 1)) / ((float)(hairPoints[tid].interpolatedPointSize + 1)) + hairPoints[tid].startPoint;
float nRad = atan2f(nNonSmoothInterpolatedPoints[i].y - hairPoints[tid].startPoint.y, nNonSmoothInterpolatedPoints[i].z - hairPoints[tid].startPoint.z);//the angle(in radians) of nonSmoothInterpolatedPoint[i]-startingPoint from the +Z axis(counterclockwise)
float rad = atan2f(hairPoints[tid].interpolatedPoints[i].y - hairPoints[tid].startPoint.y, hairPoints[tid].interpolatedPoints[i].z - hairPoints[tid].startPoint.z);//the angle(in radians) of interpolatedPoints[i]-startingPoint from +Z axis(counterclockwise)
float YZdistToStart = sqrtf((nNonSmoothInterpolatedPoints[i].y - hairPoints[tid].startPoint.y)*(nNonSmoothInterpolatedPoints[i].y - hairPoints[tid].startPoint.y)
+ (nNonSmoothInterpolatedPoints[i].z - hairPoints[tid].startPoint.z)*(nNonSmoothInterpolatedPoints[i].z - hairPoints[tid].startPoint.z));
nSmoothInterpolatedPoints[i].x = nNonSmoothInterpolatedPoints[i].x;
nSmoothInterpolatedPoints[i].y = YZdistToStart * sinf((nRad - rad)*(i + 1) / (hairPoints[tid].interpolatedPointSize + 1) + rad) + hairPoints[tid].startPoint.y;//equally divides the angle between nonSmoothInterpolatedPoints & interpolatedPoints, then sets the angle of smoothPoint[i] as i'th step between nonSmoothInterpolatedPoint& interpolatedPoints
nSmoothInterpolatedPoints[i].z = YZdistToStart * cosf((nRad - rad)*(i + 1) / (hairPoints[tid].interpolatedPointSize + 1) + rad) + hairPoints[tid].startPoint.z;
}
//move hair points
float3 nVector;//used for finding vector from one point to another
if (hairPoints[tid].interpolatedPointSize>0) hairPoints[tid].interpolatedPoints[0] = nSmoothInterpolatedPoints[0];
#pragma unroll
for (int i = 1; i < hairPoints[tid].interpolatedPointSize; i++)
{
//find NORMALIZED vector from interpoaltedPoints[i-1] to nSmoothInterpolatedPoints[i]
nVector = normalize(nSmoothInterpolatedPoints[i] - hairPoints[tid].interpolatedPoints[i - 1]);
//set interpolatedPoints[i] to interpolatedPoints[i-1] + nVector * hairLength/(intepolatedPointSize+1)
hairPoints[tid].interpolatedPoints[i] = hairPoints[tid].interpolatedPoints[i - 1] + (nVector / (float)(hairPoints[tid].interpolatedPointSize + 1))*sqrtf(
(nEndPoint.y - hairPoints[tid].startPoint.y)*(nEndPoint.y - hairPoints[tid].startPoint.y) + (nEndPoint.z - hairPoints[tid].startPoint.z)*(nEndPoint.z - hairPoints[tid].startPoint.z));
}
//set endPoint
if (hairPoints[tid].interpolatedPointSize > 0)
{
nVector = normalize(nEndPoint - hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1]);
hairPoints[tid].endPoint = hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1] + nVector / (float)(hairPoints[tid].interpolatedPointSize + 1)*sqrtf(
(nEndPoint.y - hairPoints[tid].startPoint.y)*(nEndPoint.y - hairPoints[tid].startPoint.y) + (nEndPoint.z - hairPoints[tid].startPoint.z)*(nEndPoint.z - hairPoints[tid].startPoint.z));
}
else
{
//no interpolation:=the hair will be a single line
hairPoints[tid].endPoint = nEndPoint;
}
//correct points
float angleInRad = atan2f((hairPoints[tid].endPoint - hairPoints[tid].startPoint).y, (hairPoints[tid].endPoint - hairPoints[tid].startPoint).z);//angle between current endPoint& +Z axis counterclockwise
float nAngleInRad = atan2f((nEndPoint - hairPoints[tid].startPoint).y, (nEndPoint - hairPoints[tid].startPoint).z); //angle between nEndPoint& +Z axis counterclockwise
float offsetAngleInRad = nAngleInRad - angleInRad; //rotate the hair this much counterclockwise
/*
2D rotating of a point around origin counterclockwise :
x' = x cos f - y sin f
y' = y cos f + x sin f
*/
//rotate endPoint & all interpolatedPoints offsetAngle degrees around startingPoint counterclockwise
float3 nPoint;//used for saving point info
nPoint.x = hairPoints[tid].endPoint.x;//x is 'same' on all points of hair, i.e when there is another wind that changes x direction this will POSSIBLY make things wrong
nPoint.y = (hairPoints[tid].endPoint.y - hairPoints[tid].startPoint.y)*cosf(offsetAngleInRad) + (hairPoints[tid].endPoint.z - hairPoints[tid].startPoint.z)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.y;
nPoint.z = (hairPoints[tid].endPoint.z - hairPoints[tid].startPoint.z)*cosf(offsetAngleInRad) - (hairPoints[tid].endPoint.y - hairPoints[tid].startPoint.y)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.z;
hairPoints[tid].endPoint = nPoint;
#pragma unroll
for (int i = 0; i < hairPoints[tid].interpolatedPointSize; i++)
{
nPoint.x = hairPoints[tid].interpolatedPoints[i].x;
nPoint.y = (hairPoints[tid].interpolatedPoints[i].y - hairPoints[tid].startPoint.y)*cosf(offsetAngleInRad) + (hairPoints[tid].interpolatedPoints[i].z - hairPoints[tid].startPoint.z)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.y;
nPoint.z = (hairPoints[tid].interpolatedPoints[i].z - hairPoints[tid].startPoint.z)*cosf(offsetAngleInRad) - (hairPoints[tid].interpolatedPoints[i].y - hairPoints[tid].startPoint.y)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.z;
hairPoints[tid].interpolatedPoints[i] = nPoint;
}
//call collision detection @ ApplyWind
//free
free(nNonSmoothInterpolatedPoints);
free(nSmoothInterpolatedPoints);
}
//look at V0 for more info on function
//trying to reduce register count from 48 to 32 since it brings occupancy 63->94
//FAILED -> trying to limit register count in the compiler settings did not cause decrease in register count
__global__ void StartWindZV1(hair *hairPoints, char sign, float strength)
{
//look at applywind()--cpu implementation-- for more info
int tid = blockDim.x*blockIdx.x + threadIdx.x;
//change y,z points, x is always same
//if sign is +, z point gets decreased vice versa
//find effective wind strength--same on all points of the hair
#define effectiveStrengthV1 ((0.75f + (hairPoints[tid].startPoint.z) / (4 * *oHeadRadius_device))*strength)
//TODO possible performance improvement: instead of calculating Zdist&y point seperataly, do sin cos calculations(like rotation)
//calculate nEndPoint
float3 nEndPoint;
nEndPoint.x = hairPoints[tid].startPoint.x;
nEndPoint.y = (hairPoints[tid].startPoint.y - hairPoints[tid].endPoint.y)*effectiveStrengthV1 + hairPoints[tid].endPoint.y;
#define ZdistV1 (sqrtf(*oHairLength_device * *oHairLength_device - (nEndPoint.y - hairPoints[tid].startPoint.y)*(nEndPoint.y - hairPoints[tid].startPoint.y)))
if (sign == '+')
{
nEndPoint.z = hairPoints[tid].startPoint.z - ZdistV1;
}
else//sign == '-'
{
nEndPoint.z = hairPoints[tid].startPoint.z + ZdistV1;
}
float3 * nNonSmoothInterpolatedPoints, *nSmoothInterpolatedPoints;
nNonSmoothInterpolatedPoints = (float3*)malloc(sizeof(float3)*hairPoints[tid].interpolatedPointSize);
nSmoothInterpolatedPoints = (float3*)malloc(sizeof(float3)*hairPoints[tid].interpolatedPointSize);
//calculate nNonSmoothInterpolatedPoints for each interpolated point, then set nSmoothInterpolatedPoints
for (int i = 0; i < hairPoints[tid].interpolatedPointSize; i++)
{
//smoothPoint calculations
nNonSmoothInterpolatedPoints[i] = (nEndPoint - hairPoints[tid].startPoint)*((float)(i + 1)) / ((float)(hairPoints[tid].interpolatedPointSize + 1)) + hairPoints[tid].startPoint;
float nRad = atan2f(nNonSmoothInterpolatedPoints[i].y - hairPoints[tid].startPoint.y, nNonSmoothInterpolatedPoints[i].z - hairPoints[tid].startPoint.z);//the angle(in radians) of nonSmoothInterpolatedPoint[i]-startingPoint from the +Z axis(counterclockwise)
float rad = atan2f(hairPoints[tid].interpolatedPoints[i].y - hairPoints[tid].startPoint.y, hairPoints[tid].interpolatedPoints[i].z - hairPoints[tid].startPoint.z);//the angle(in radians) of interpolatedPoints[i]-startingPoint from +Z axis(counterclockwise)
float YZdistToStart = sqrtf((nNonSmoothInterpolatedPoints[i].y - hairPoints[tid].startPoint.y)*(nNonSmoothInterpolatedPoints[i].y - hairPoints[tid].startPoint.y)
+ (nNonSmoothInterpolatedPoints[i].z - hairPoints[tid].startPoint.z)*(nNonSmoothInterpolatedPoints[i].z - hairPoints[tid].startPoint.z));
nSmoothInterpolatedPoints[i].x = nNonSmoothInterpolatedPoints[i].x;
nSmoothInterpolatedPoints[i].y = YZdistToStart * sinf((nRad - rad)*(i + 1) / (hairPoints[tid].interpolatedPointSize + 1) + rad) + hairPoints[tid].startPoint.y;//equally divides the angle between nonSmoothInterpolatedPoints & interpolatedPoints, then sets the angle of smoothPoint[i] as i'th step between nonSmoothInterpolatedPoint& interpolatedPoints
nSmoothInterpolatedPoints[i].z = YZdistToStart * cosf((nRad - rad)*(i + 1) / (hairPoints[tid].interpolatedPointSize + 1) + rad) + hairPoints[tid].startPoint.z;
}
//move hair points
float3 nVector;//used for finding vector from one point to another
if (hairPoints[tid].interpolatedPointSize>0) hairPoints[tid].interpolatedPoints[0] = nSmoothInterpolatedPoints[0];
for (int i = 1; i < hairPoints[tid].interpolatedPointSize; i++)
{
//find NORMALIZED vector from interpoaltedPoints[i-1] to nSmoothInterpolatedPoints[i]
nVector = normalize(nSmoothInterpolatedPoints[i] - hairPoints[tid].interpolatedPoints[i - 1]);
//set interpolatedPoints[i] to interpolatedPoints[i-1] + nVector * hairLength/(intepolatedPointSize+1)
hairPoints[tid].interpolatedPoints[i] = hairPoints[tid].interpolatedPoints[i - 1] + (nVector / (float)(hairPoints[tid].interpolatedPointSize + 1))*sqrtf(
(nEndPoint.y - hairPoints[tid].startPoint.y)*(nEndPoint.y - hairPoints[tid].startPoint.y) + (nEndPoint.z - hairPoints[tid].startPoint.z)*(nEndPoint.z - hairPoints[tid].startPoint.z));
}
//set endPoint
if (hairPoints[tid].interpolatedPointSize > 0)
{
nVector = normalize(nEndPoint - hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1]);
hairPoints[tid].endPoint = hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1] + nVector / (float)(hairPoints[tid].interpolatedPointSize + 1)*sqrtf(
(nEndPoint.y - hairPoints[tid].startPoint.y)*(nEndPoint.y - hairPoints[tid].startPoint.y) + (nEndPoint.z - hairPoints[tid].startPoint.z)*(nEndPoint.z - hairPoints[tid].startPoint.z));
}
else
{
//no interpolation:=the hair will be a single line
hairPoints[tid].endPoint = nEndPoint;
}
//correct points
float angleInRad = atan2f((hairPoints[tid].endPoint - hairPoints[tid].startPoint).y, (hairPoints[tid].endPoint - hairPoints[tid].startPoint).z);//angle between current endPoint& +Z axis counterclockwise
float nAngleInRad = atan2f((nEndPoint - hairPoints[tid].startPoint).y, (nEndPoint - hairPoints[tid].startPoint).z); //angle between nEndPoint& +Z axis counterclockwise
float offsetAngleInRad = nAngleInRad - angleInRad; //rotate the hair this much counterclockwise
/*
2D rotating of a point around origin counterclockwise :
x' = x cos f - y sin f
y' = y cos f + x sin f
*/
//rotate endPoint & all interpolatedPoints offsetAngle degrees around startingPoint counterclockwise
float3 nPoint;//used for saving point info
nPoint.x = hairPoints[tid].endPoint.x;//x is 'same' on all points of hair, i.e when there is another wind that changes x direction this will POSSIBLY make things wrong
nPoint.y = (hairPoints[tid].endPoint.y - hairPoints[tid].startPoint.y)*cosf(offsetAngleInRad) + (hairPoints[tid].endPoint.z - hairPoints[tid].startPoint.z)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.y;
nPoint.z = (hairPoints[tid].endPoint.z - hairPoints[tid].startPoint.z)*cosf(offsetAngleInRad) - (hairPoints[tid].endPoint.y - hairPoints[tid].startPoint.y)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.z;
hairPoints[tid].endPoint = nPoint;
for (int i = 0; i < hairPoints[tid].interpolatedPointSize; i++)
{
nPoint.x = hairPoints[tid].interpolatedPoints[i].x;
nPoint.y = (hairPoints[tid].interpolatedPoints[i].y - hairPoints[tid].startPoint.y)*cosf(offsetAngleInRad) + (hairPoints[tid].interpolatedPoints[i].z - hairPoints[tid].startPoint.z)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.y;
nPoint.z = (hairPoints[tid].interpolatedPoints[i].z - hairPoints[tid].startPoint.z)*cosf(offsetAngleInRad) - (hairPoints[tid].interpolatedPoints[i].y - hairPoints[tid].startPoint.y)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.z;
hairPoints[tid].interpolatedPoints[i] = nPoint;
}
//call collision detection @ ApplyWind
//free
free(nNonSmoothInterpolatedPoints);
free(nSmoothInterpolatedPoints);
}
//each thread does work on a single hair
//gets hairPoints, hairPartitionSize, wind axis sign, wind strength
//receives hair length from oHairLength--saved@ SaveOriginalSettings
//hair point is pointer to ONE hair strand data
//wind axis sign determines the clockwise-counterclokwise'ness of hair rotation
//wind strength determines the angle of hair rotation
//returns void, changes positions of ONE hair strand based on the type of wind
//other versions are StartWindY, StartWindX
__global__ void StartWindZV0(hair *hairPoints, char sign, float strength)
{
//look at applywind()--cpu implementation-- for more info
int tid = blockDim.x*blockIdx.x + threadIdx.x;
//change y,z points, x is always same
//if sign is +, z point gets decreased vice versa
//find effective wind strength--same on all points of the hair
float effectiveStrength = (0.75f + (hairPoints[tid].startPoint.z) / (4 * *oHeadRadius_device))*strength;
//TODO possible performance improvement: instead of calculating Zdist&y point seperataly, do sin cos calculations(like rotation)
//calculate nEndPoint
float3 nEndPoint;
nEndPoint.x = hairPoints[tid].startPoint.x;
nEndPoint.y = (hairPoints[tid].startPoint.y - hairPoints[tid].endPoint.y)*effectiveStrength + hairPoints[tid].endPoint.y;
float Zdist = sqrtf(*oHairLength_device * *oHairLength_device - (nEndPoint.y - hairPoints[tid].startPoint.y)*(nEndPoint.y - hairPoints[tid].startPoint.y));
if (sign == '+')
{
nEndPoint.z = hairPoints[tid].startPoint.z - Zdist;
}
else//sign == '-'
{
nEndPoint.z = hairPoints[tid].startPoint.z + Zdist;
}
float3 * nNonSmoothInterpolatedPoints, *nSmoothInterpolatedPoints;
nNonSmoothInterpolatedPoints= (float3*)malloc(sizeof(float3)*hairPoints[tid].interpolatedPointSize);
nSmoothInterpolatedPoints = (float3*)malloc(sizeof(float3)*hairPoints[tid].interpolatedPointSize);
//calculate nNonSmoothInterpolatedPoints for each interpolated point, then set nSmoothInterpolatedPoints
for (int i = 0; i < hairPoints[tid].interpolatedPointSize; i++)
{
//smoothPoint calculations
nNonSmoothInterpolatedPoints[i] = (nEndPoint - hairPoints[tid].startPoint)*((float)(i + 1)) / ((float)(hairPoints[tid].interpolatedPointSize + 1)) + hairPoints[tid].startPoint;
float nRad = atan2f(nNonSmoothInterpolatedPoints[i].y - hairPoints[tid].startPoint.y, nNonSmoothInterpolatedPoints[i].z - hairPoints[tid].startPoint.z);//the angle(in radians) of nonSmoothInterpolatedPoint[i]-startingPoint from the +Z axis(counterclockwise)
float rad = atan2f(hairPoints[tid].interpolatedPoints[i].y - hairPoints[tid].startPoint.y, hairPoints[tid].interpolatedPoints[i].z - hairPoints[tid].startPoint.z);//the angle(in radians) of interpolatedPoints[i]-startingPoint from +Z axis(counterclockwise)
float YZdistToStart = sqrtf((nNonSmoothInterpolatedPoints[i].y - hairPoints[tid].startPoint.y)*(nNonSmoothInterpolatedPoints[i].y - hairPoints[tid].startPoint.y)
+ (nNonSmoothInterpolatedPoints[i].z - hairPoints[tid].startPoint.z)*(nNonSmoothInterpolatedPoints[i].z - hairPoints[tid].startPoint.z));
nSmoothInterpolatedPoints[i].x = nNonSmoothInterpolatedPoints[i].x;
nSmoothInterpolatedPoints[i].y = YZdistToStart * sinf((nRad - rad)*(i + 1) / (hairPoints[tid].interpolatedPointSize + 1) + rad) + hairPoints[tid].startPoint.y;//equally divides the angle between nonSmoothInterpolatedPoints & interpolatedPoints, then sets the angle of smoothPoint[i] as i'th step between nonSmoothInterpolatedPoint& interpolatedPoints
nSmoothInterpolatedPoints[i].z = YZdistToStart * cosf((nRad - rad)*(i + 1) / (hairPoints[tid].interpolatedPointSize + 1) + rad) + hairPoints[tid].startPoint.z;
}
//move hair points
float3 nVector;//used for finding vector from one point to another
if (hairPoints[tid].interpolatedPointSize>0) hairPoints[tid].interpolatedPoints[0] = nSmoothInterpolatedPoints[0];
for (int i = 1; i < hairPoints[tid].interpolatedPointSize; i++)
{
//find NORMALIZED vector from interpoaltedPoints[i-1] to nSmoothInterpolatedPoints[i]
nVector = normalize(nSmoothInterpolatedPoints[i] - hairPoints[tid].interpolatedPoints[i - 1]);
//set interpolatedPoints[i] to interpolatedPoints[i-1] + nVector * hairLength/(intepolatedPointSize+1)
hairPoints[tid].interpolatedPoints[i] = hairPoints[tid].interpolatedPoints[i - 1] + (nVector / (float)(hairPoints[tid].interpolatedPointSize + 1))*sqrtf(
(nEndPoint.y - hairPoints[tid].startPoint.y)*(nEndPoint.y - hairPoints[tid].startPoint.y) + (nEndPoint.z - hairPoints[tid].startPoint.z)*(nEndPoint.z - hairPoints[tid].startPoint.z));
}
//set endPoint
if (hairPoints[tid].interpolatedPointSize > 0)
{
nVector = normalize(nEndPoint - hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1]);
hairPoints[tid].endPoint = hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1] + nVector / (float)(hairPoints[tid].interpolatedPointSize + 1)*sqrtf(
(nEndPoint.y - hairPoints[tid].startPoint.y)*(nEndPoint.y - hairPoints[tid].startPoint.y) + (nEndPoint.z - hairPoints[tid].startPoint.z)*(nEndPoint.z - hairPoints[tid].startPoint.z));
}
else
{
//no interpolation:=the hair will be a single line
hairPoints[tid].endPoint = nEndPoint;
}
//correct points
float angleInRad = atan2f((hairPoints[tid].endPoint - hairPoints[tid].startPoint).y, (hairPoints[tid].endPoint - hairPoints[tid].startPoint).z);//angle between current endPoint& +Z axis counterclockwise
float nAngleInRad = atan2f((nEndPoint - hairPoints[tid].startPoint).y, (nEndPoint - hairPoints[tid].startPoint).z); //angle between nEndPoint& +Z axis counterclockwise
float offsetAngleInRad = nAngleInRad - angleInRad;//rotate the hair this much counterclockwise
/*
2D rotating of a point around origin counterclockwise :
x' = x cos f - y sin f
y' = y cos f + x sin f
*/
//rotate endPoint & all interpolatedPoints offsetAngle degrees around startingPoint counterclockwise
float3 nPoint;//used for saving point info
nPoint.x = hairPoints[tid].endPoint.x;//x is 'same' on all points of hair, i.e when there is another wind that changes x direction this will POSSIBLY make things wrong
nPoint.y = (hairPoints[tid].endPoint.y - hairPoints[tid].startPoint.y)*cosf(offsetAngleInRad) + (hairPoints[tid].endPoint.z - hairPoints[tid].startPoint.z)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.y;
nPoint.z = (hairPoints[tid].endPoint.z - hairPoints[tid].startPoint.z)*cosf(offsetAngleInRad) - (hairPoints[tid].endPoint.y - hairPoints[tid].startPoint.y)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.z;
hairPoints[tid].endPoint = nPoint;
for (int i = 0; i < hairPoints[tid].interpolatedPointSize; i++)
{
nPoint.x = hairPoints[tid].interpolatedPoints[i].x;
nPoint.y = (hairPoints[tid].interpolatedPoints[i].y - hairPoints[tid].startPoint.y)*cosf(offsetAngleInRad) + (hairPoints[tid].interpolatedPoints[i].z - hairPoints[tid].startPoint.z)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.y;
nPoint.z = (hairPoints[tid].interpolatedPoints[i].z - hairPoints[tid].startPoint.z)*cosf(offsetAngleInRad) - (hairPoints[tid].interpolatedPoints[i].y - hairPoints[tid].startPoint.y)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.z;
hairPoints[tid].interpolatedPoints[i] = nPoint;
}
//call collision detection @ ApplyWind
//free
free(nNonSmoothInterpolatedPoints);
free(nSmoothInterpolatedPoints);
}
//trying to unroll loops
__global__ void CollisionDetectionV1(hair* hairPoints)
{
int tid = blockDim.x*blockIdx.x + threadIdx.x;
//calculate minorLength which is the length of the hypothenuse of the O'-startingPoint.y-StartingPoint.z (O' is the center of the smaller circle which is on the same X point as rest of the points on this hair strand)
float3 minorCenter;//center of the minor circle that passes through startPoint
minorCenter.x = hairPoints[tid].startPoint.x;
minorCenter.y = 0;
minorCenter.z = 0;
float minorLength = length(hairPoints[tid].startPoint - minorCenter);
//we want the hair points(except startingpoint) to be 0.1f away from the head, simply push the hair 0.1f up from the head which is approx. 0.1f away from the origin
//check the interpolatedPoints
#pragma unroll
for (int i = 0; i < hairPoints[tid].interpolatedPointSize; i++)
{
//collision detected
if (length(hairPoints[tid].interpolatedPoints[i] - minorCenter) < minorLength + 0.1f)//remember that we wanted the hairPoints to be at an offset of 0.1f
{
//how to:
//find a newPoint that is rotated out the head(vertically)
//move all points (from hairPoints[tid].interpolatedPoints[i+1] to endPoint) to new positions while keeping the distance&angle between j'th & j-1'th point same
//set the hairPoints[tid].interpolatedPoints[i] as new point
//for more explanations on degrees etc. check the drawings
float n;//distance between 2 consecutive points on a hair strand
float gammaRad;//the angle(in radians) between previous point & +Z axis counterclockwise
float alphaPrimeRad;//the angle(in radians) between newPoint & previous point @origin counterclockwise
if (i == 0)//if first interpolatedPoint is inside head
{
n = length(hairPoints[tid].interpolatedPoints[0] - hairPoints[tid].startPoint);
//first edge is minorLength --> distance between startPoint&minorCenter, is always |minorLength|
//second edge is minorLength+0.1f
//third edge is n
alphaPrimeRad = acosf((minorLength*minorLength + (minorLength + 0.1f)*(minorLength + 0.1f) - n * n) /
(2 * minorLength*(minorLength + 0.1f)));
if (isnan(alphaPrimeRad))alphaPrimeRad = PI;
gammaRad = atan2f(hairPoints[tid].startPoint.y, hairPoints[tid].startPoint.z);
}
else//non first interpolatedPoint is inside head
{
n = length(hairPoints[tid].interpolatedPoints[i] - hairPoints[tid].interpolatedPoints[i - 1]);
//first edge is prev distance=length(previous point- minorcenter), which is sometimes equal to minorLength+0.1f
//second edge is minorLength+0.1f
//third edge is n
float prevDistance = length(hairPoints[tid].interpolatedPoints[i - 1] - minorCenter);
alphaPrimeRad = acosf((prevDistance*prevDistance + (minorLength + 0.1f)*(minorLength + 0.1f) - n * n) /
(2 * prevDistance * (minorLength + 0.1f)));
if (isnan(alphaPrimeRad)) alphaPrimeRad = PI;
gammaRad = atan2f(hairPoints[tid].interpolatedPoints[i - 1].y, hairPoints[tid].interpolatedPoints[i - 1].z);
}
float3 prevPoint = (i > 0 ? hairPoints[tid].interpolatedPoints[i - 1] : hairPoints[tid].startPoint);//the point that comes before this point
float3 newPoint;
newPoint.x = hairPoints[tid].interpolatedPoints[i].x;
if ((prevPoint.y - n)*(prevPoint.y - n) + prevPoint.z*prevPoint.z>(minorLength + 0.1f)*(minorLength + 0.1f))//the hair falls down
{
newPoint.y = prevPoint.y - n;
newPoint.z = prevPoint.z;
}
else//the hair follows curvature of the head
{
newPoint.y = (minorLength + 0.1f)*(sinf(alphaPrimeRad + gammaRad));
newPoint.z = (minorLength + 0.1f)*(cosf(alphaPrimeRad + gammaRad));
}
//set rest of the points -- keep the distance & degrees between 2 consecutive points same
//find vectors to add the previous point, which will give us the new points,
float3 *moveVectors;//stores data on change of coordinates in consecutive points on hair, including endPoint
moveVectors = (float3*)malloc(sizeof(float3)*(hairPoints[tid].interpolatedPointSize - i));
#pragma unroll
for (int j = i + 1; j < hairPoints[tid].interpolatedPointSize; j++)
{
moveVectors[j - i - 1] = hairPoints[tid].interpolatedPoints[j] - hairPoints[tid].interpolatedPoints[j - 1];
}
moveVectors[hairPoints[tid].interpolatedPointSize - i - 1] = hairPoints[tid].endPoint - hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1];//set vector between end point - last interpolated point
//set this point as new point
hairPoints[tid].interpolatedPoints[i] = newPoint;
//add respective moveVectors to rest of the interpolatedPoints
#pragma unroll
for (int j = i + 1; j < hairPoints[tid].interpolatedPointSize; j++) hairPoints[tid].interpolatedPoints[j] = hairPoints[tid].interpolatedPoints[j - 1] + moveVectors[j - i - 1];
//add moveVectors[-1] to endPoint
hairPoints[tid].endPoint = hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1] + moveVectors[hairPoints[tid].interpolatedPointSize - i - 1];
free(moveVectors);
}
}
//check the endPoint
if (length(hairPoints[tid].endPoint - minorCenter) < minorLength + 0.1f)
{
float n;//distance between 2 consecutive points on a hair strand
float gammaRad;//the angle(in radians) between previous point & +Z axis counterclockwise
float alphaPrimeRad;//the angle(in radians) between newPoint & +Z axis counterclockwise
//calculate gammaRad & alphaPrimeRad
if (hairPoints[tid].interpolatedPointSize > 0)//when there is at least 1 interpolatedPoint
{
n = length(hairPoints[tid].endPoint - hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1]);
gammaRad = atan2f(hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1].y, hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1].z);
//first edge is prev distance=length(previous point- minorcenter), which is sometimes equal to minorLength+0.1f
//second edge is minorLength+0.1f
//third edge is n
float prevDistance = length(hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1] - minorCenter);
alphaPrimeRad = acosf((prevDistance*prevDistance + (minorLength + 0.1f)*(minorLength + 0.1f) - n * n) /
(2 * prevDistance * (minorLength + 0.1f)));
if (isnan(alphaPrimeRad)) alphaPrimeRad = PI;//acosf can return nan
}
else//no interpolation --> previous point is starting point
{
n = length(hairPoints[tid].endPoint - hairPoints[tid].startPoint);
gammaRad = atan2f(hairPoints[tid].startPoint.y, hairPoints[tid].startPoint.z);
//first edge is minorlength
//second edge is minorLength+0.1f
//third edge is n
alphaPrimeRad = acosf((2 * minorLength*(minorLength + 0.1f) - n * n) /
(2 * minorLength*(minorLength + 0.1f)));
if (isnan(alphaPrimeRad)) alphaPrimeRad = PI;//acosf can return nan
}
float3 prevPoint = (hairPoints[tid].interpolatedPointSize > 0 ? hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1] : hairPoints[tid].startPoint);//the point that comes before end point
//rotate endPoint out of the head
if ((prevPoint.y - n)*(prevPoint.y - n) + prevPoint.z*prevPoint.z>(minorLength + 0.1f)*(minorLength + 0.1f))//hair falls down
{
hairPoints[tid].endPoint.y = prevPoint.y - n;
hairPoints[tid].endPoint.z = prevPoint.z;
}
else//hair follows curvature of the head
{
hairPoints[tid].endPoint.y = (minorLength + 0.1f)*(sinf(alphaPrimeRad + gammaRad));
hairPoints[tid].endPoint.z = (minorLength + 0.1f)*(cosf(alphaPrimeRad + gammaRad));
}
}
}
//gets hairPoints
//receives head radius from oHeadRadius
//if there is a collision between any point on hair(except startingPoint) & head, rotates the hair out of the head in +Y direction, does NOT CHANGES x point of the hair
//returns void, changes hair points' positions that are inside the head
__global__ void CollisionDetectionV0(hair* hairPoints)
{
int tid = blockDim.x*blockIdx.x + threadIdx.x;
//calculate minorLength which is the length of the hypothenuse of the O'-startingPoint.y-StartingPoint.z (O' is the center of the smaller circle which is on the same X point as rest of the points on this hair strand)
float3 minorCenter;//center of the minor circle that passes through startPoint
minorCenter.x = hairPoints[tid].startPoint.x;
minorCenter.y = 0;
minorCenter.z = 0;
float minorLength = length(hairPoints[tid].startPoint - minorCenter);
//we want the hair points(except startingpoint) to be 0.1f away from the head, simply push the hair 0.1f up from the head which is approx. 0.1f away from the origin
//check the interpolatedPoints
for (int i = 0; i < hairPoints[tid].interpolatedPointSize; i++)
{
//collision detected
if (length(hairPoints[tid].interpolatedPoints[i] - minorCenter) < minorLength + 0.1f)//remember that we wanted the hairPoints to be at an offset of 0.1f
{
//how to:
//find a newPoint that is rotated out the head(vertically)
//move all points (from hairPoints[tid].interpolatedPoints[i+1] to endPoint) to new positions while keeping the distance&angle between j'th & j-1'th point same
//set the hairPoints[tid].interpolatedPoints[i] as new point
//for more explanations on degrees etc. check the drawings
float n;//distance between 2 consecutive points on a hair strand
float gammaRad;//the angle(in radians) between previous point & +Z axis counterclockwise
float alphaPrimeRad;//the angle(in radians) between newPoint & previous point @origin counterclockwise
if (i == 0)//if first interpolatedPoint is inside head
{
n = length(hairPoints[tid].interpolatedPoints[0] - hairPoints[tid].startPoint);
//first edge is minorLength --> distance between startPoint&minorCenter, is always |minorLength|
//second edge is minorLength+0.1f
//third edge is n
alphaPrimeRad = acosf((minorLength*minorLength + (minorLength + 0.1f)*(minorLength + 0.1f) - n * n) /
(2 * minorLength*(minorLength + 0.1f)));
if (isnan(alphaPrimeRad))alphaPrimeRad = PI;
gammaRad = atan2f(hairPoints[tid].startPoint.y, hairPoints[tid].startPoint.z);
}
else//non first interpolatedPoint is inside head
{
n = length(hairPoints[tid].interpolatedPoints[i] - hairPoints[tid].interpolatedPoints[i - 1]);
//first edge is prev distance=length(previous point- minorcenter), which is sometimes equal to minorLength+0.1f
//second edge is minorLength+0.1f
//third edge is n
float prevDistance = length(hairPoints[tid].interpolatedPoints[i - 1] - minorCenter);
alphaPrimeRad = acosf((prevDistance*prevDistance + (minorLength + 0.1f)*(minorLength + 0.1f) - n * n) /
(2 * prevDistance * (minorLength + 0.1f)));
if (isnan(alphaPrimeRad)) alphaPrimeRad = PI;
gammaRad = atan2f(hairPoints[tid].interpolatedPoints[i - 1].y, hairPoints[tid].interpolatedPoints[i - 1].z);
}
float3 prevPoint = (i > 0 ? hairPoints[tid].interpolatedPoints[i - 1] : hairPoints[tid].startPoint);//the point that comes before this point
float3 newPoint;
newPoint.x = hairPoints[tid].interpolatedPoints[i].x;
if ((prevPoint.y - n)*(prevPoint.y - n) + prevPoint.z*prevPoint.z>(minorLength + 0.1f)*(minorLength + 0.1f))//the hair falls down
{
newPoint.y = prevPoint.y - n;
newPoint.z = prevPoint.z;
}
else//the hair follows curvature of the head
{
newPoint.y = (minorLength + 0.1f)*(sinf(alphaPrimeRad + gammaRad));
newPoint.z = (minorLength + 0.1f)*(cosf(alphaPrimeRad + gammaRad));
}
//set rest of the points -- keep the distance & degrees between 2 consecutive points same
//find vectors to add the previous point, which will give us the new points,
float3 *moveVectors;//stores data on change of coordinates in consecutive points on hair, including endPoint
moveVectors = (float3*)malloc(sizeof(float3)*(hairPoints[tid].interpolatedPointSize - i));
for (int j = i + 1; j < hairPoints[tid].interpolatedPointSize; j++)
{
moveVectors[j - i - 1] = hairPoints[tid].interpolatedPoints[j] - hairPoints[tid].interpolatedPoints[j - 1];
}
moveVectors[hairPoints[tid].interpolatedPointSize - i - 1] = hairPoints[tid].endPoint - hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1];//set vector between end point - last interpolated point
//set this point as new point
hairPoints[tid].interpolatedPoints[i] = newPoint;
//add respective moveVectors to rest of the interpolatedPoints
for (int j = i + 1; j < hairPoints[tid].interpolatedPointSize; j++) hairPoints[tid].interpolatedPoints[j] = hairPoints[tid].interpolatedPoints[j - 1] + moveVectors[j - i - 1];
//add moveVectors[-1] to endPoint
hairPoints[tid].endPoint = hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1] + moveVectors[hairPoints[tid].interpolatedPointSize - i - 1];
free(moveVectors);
}
}
//check the endPoint
if (length(hairPoints[tid].endPoint - minorCenter) < minorLength + 0.1f)
{
float n;//distance between 2 consecutive points on a hair strand
float gammaRad;//the angle(in radians) between previous point & +Z axis counterclockwise
float alphaPrimeRad;//the angle(in radians) between newPoint & +Z axis counterclockwise
//calculate gammaRad & alphaPrimeRad
if (hairPoints[tid].interpolatedPointSize > 0)//when there is at least 1 interpolatedPoint
{
n = length(hairPoints[tid].endPoint - hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1]);
gammaRad = atan2f(hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1].y, hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1].z);
//first edge is prev distance=length(previous point- minorcenter), which is sometimes equal to minorLength+0.1f
//second edge is minorLength+0.1f
//third edge is n
float prevDistance = length(hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1] - minorCenter);
alphaPrimeRad = acosf((prevDistance*prevDistance + (minorLength + 0.1f)*(minorLength + 0.1f) - n * n) /
(2 * prevDistance * (minorLength + 0.1f)));
if (isnan(alphaPrimeRad)) alphaPrimeRad = PI;//acosf can return nan
}
else//no interpolation --> previous point is starting point
{
n = length(hairPoints[tid].endPoint - hairPoints[tid].startPoint);
gammaRad = atan2f(hairPoints[tid].startPoint.y, hairPoints[tid].startPoint.z);
//first edge is minorlength
//second edge is minorLength+0.1f
//third edge is n
alphaPrimeRad = acosf((2 * minorLength*(minorLength + 0.1f) - n * n) /
(2 * minorLength*(minorLength + 0.1f)));
if (isnan(alphaPrimeRad)) alphaPrimeRad = PI;//acosf can return nan
}
float3 prevPoint = (hairPoints[tid].interpolatedPointSize > 0 ? hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1] : hairPoints[tid].startPoint);//the point that comes before end point
//rotate endPoint out of the head
if ((prevPoint.y - n)*(prevPoint.y - n) + prevPoint.z*prevPoint.z>(minorLength + 0.1f)*(minorLength + 0.1f))//hair falls down
{
hairPoints[tid].endPoint.y = prevPoint.y - n;
hairPoints[tid].endPoint.z = prevPoint.z;
}
else//hair follows curvature of the head
{
hairPoints[tid].endPoint.y = (minorLength + 0.1f)*(sinf(alphaPrimeRad + gammaRad));
hairPoints[tid].endPoint.z = (minorLength + 0.1f)*(cosf(alphaPrimeRad + gammaRad));
}
}
}
//frees hairPoints_device
void FreeAllGPU(hair *hairPoints)
{
//for (int i = 0; i < oHairCount_kernel; i++) free((hairPoints[i].interpolatedPoints));// interpolated points are already free'd with h_data.interpoalted point since they point to the same point on memory
gpuErrchk(hipFree(hairPoints));
}
| a0e118c9e82e2d1c51172adedacd24c27d57ba39.cu |
#include "kernel.cuh"
#define PI 3.14159265358979f
//these are original values, DO NOT FORGET TO SAVE THEM
__constant__ int oHairCount_device[sizeof(int)];
__constant__ float oHeadRadius_device[sizeof(float)];
__constant__ float oHairLength_device[sizeof(float)];
//__device__ int oHairCount_device;
//__device__ float oHeadRadius_device;
//__device__ float oHairLength_device;
int oHairCount_kernel;
float oHeadRadius_kernel;
float oHairLength_kernel;
//lookup table for sin -- for alpha Radians access alpha/PI*2000 --- for values bigger than PI return -sin(alpha-PI)
//__constant__ float sinT[2001];
//used for setting up the scene
//head position is assumed to be at 0
//headRadius is radius of the head, which is spherical
//hairLength is length of each hairstrand
void SaveOriginalSettingsGPU(float headRadius, int hairCount, float hairLength)
{
//set the __constant__ values
gpuErrchk(cudaMemcpyToSymbol(oHeadRadius_device, &headRadius, sizeof(float)));
gpuErrchk(cudaMemcpyToSymbol(oHairCount_device, &hairCount, sizeof(int)));
gpuErrchk(cudaMemcpyToSymbol(oHairLength_device, &hairLength, sizeof(float)));
oHairCount_kernel = hairCount;
oHeadRadius_kernel = headRadius;
oHairLength_kernel = hairLength;
}
//creates look up table for sin
//void CreateSinLookUpTable()
//{
// float sine[2001];
// for (int index = 0; index < 2001; index++)
// {
// sine[index] = sinf(PI * (index - 1000) / 1000.0);
// }
//
// gpuErrchk(cudaMemcpyToSymbol(sinT, sine, 2001*sizeof(float)));
//}
//gets hairCount, hairRadius & hair *hairPoints
//sets hairPoints.startingPoints--groups of 32-- to appropriate values
//hair only comes out in 3/4th of head -- look at the bottom of this script for a visualization on where the hair comes out etc.
//returns void, changes hairPoints.startPoints
void HairPointASetterGPU(int hairCount, float headRadius, hair *hairPoints)
{
if (sizeof(hairPoints) == 0)
{
std::cout << "hairPoints haven't been malloc'd when calling HairPointASetter" << std::endl;
return;
}
float3 v;
float remainingDist;// used for calculating positions of y and x correctly
srand(static_cast <unsigned> (time(0))); //seed random
for (int i =0; i <hairCount; i+=32)
{
//z can be [-headRadius,headRadius]
v.z = -headRadius + static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / (2 * headRadius)));
float z = v.z;//used for creating 31 similar hairs
remainingDist = sqrtf(headRadius * headRadius - v.z*v.z);
if (v.z > 0)// y cant be negative if z is positive ##look at the bottom of the script for explanation
{
if (remainingDist == 0)v.y = 0;//division by 0 prevention
else v.y = static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / (2 * remainingDist))); //y can be [0,remainingDist]
}
else
{
//y can be [-remainingDist,remainingDist]
if (remainingDist == 0) v.y = 0;//division by 0 prevention
else v.y = -remainingDist + static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / (2 * remainingDist)));
}
float y = v.y;//used for creating 31 similar hairs
remainingDist = sqrtf(headRadius * headRadius - v.z*v.z - v.y*v.y);
//x can be -remainingDist or remainingDist
if (remainingDist == 0) v.x = 0;//divison by 0 prevention
else v.x = remainingDist * (static_cast<float>(rand()) > (static_cast<float> (RAND_MAX) / 2.0f) ? 1 : -1);
float x = v.x;//used for creating 31 similar hairs
if (isnan(v.x) || isnan(v.y) || isnan(v.z) || headRadius * headRadius - length(v) < 0)//recalculate
{
i-=32;
continue;
}
//set starting point
hairPoints[i].startPoint = v;
//we created a hair now create 31 similar hairs
for (int j = 1; j < 32; j++)
{
v.z = z + static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / (0.2f)));//z change can be [-0.1f,0.1f]
remainingDist = sqrtf(headRadius * headRadius - v.z*v.z);
if (v.z > 0)// y cant be negative if z is positive ##look at the bottom of the script for explanation
{
if (remainingDist == 0)v.y = 0;//division by 0 prevention
else v.y = static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / (2 * remainingDist))); //y can be [0,remainingDist]
}
else
{
//y can be [-remainingDist,remainingDist]
if (remainingDist == 0) v.y = 0;//division by 0 prevention
else v.y = -remainingDist + static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / (2 * remainingDist)));
}
if ((y > 0 && v.y < 0) || (y < 0 && v.y>0)) v.y *= -1;
remainingDist = sqrtf(headRadius * headRadius - v.z*v.z - v.y*v.y);
//x can be -remainingDist or remainingDist
if (remainingDist == 0) v.x = 0;//divison by 0 prevention
else v.x = remainingDist * (static_cast<float>(rand()) > (static_cast<float> (RAND_MAX) / 2.0f) ? 1 : -1);
if ((x > 0 && v.x < 0) || (x < 0 && v.x>0)) v.x *= -1;
if (isnan(v.x) || isnan(v.y) || isnan(v.z) || headRadius * headRadius - length(v) < 0)//recalculate
{
j--;
continue;
}
//set starting point
hairPoints[i+j].startPoint = v;
}
}
}
//hair points is pointer to hair data
//wind origin is a char {+-X, +-Y, +-Z}, denotes the origin point of the wind, i.e wind blows from wind origin towards head
//wind strength is a float used for determining the amount each individual hair moves-rotates
//hairPartitionSize is the hairs that each
//smoothing is the amount of steps required for each hair strand to reach peak position
//recieves hairCount, headRadius from global variable(s) oHairCount, oHeadRadius
//TODO additional feature: can use this routine multiple times without stopping all winds
//returns void*(must), changes the direction-rotation of hair OVER 'TIME' UP TO PEAK POSITION depending on smoothing by calling startWind method on GPU
void ApplyWindV0(hair *hairPoints,wind w, int blockSize, int blockCount, int smoothing)
{
if (blockSize*blockCount != oHairCount_kernel)
{
std::cout<<"block size * block count != hair count when calling ApplyWindV0"<<std::endl;
//exit(-1);
return;
}
if (w.strength == 0)
{
return;
}
if (hairPoints == NULL)
{
std::cout << "hairPoints is not malloc'd when calling ApplyWindV0";
//exit(-1);
return;
}
if ((w.axis != 'X' && w.axis != 'Y' && w.axis != 'Z') || (w.axisSign != '+' && w.axisSign != '-'))
{
std::cout << "wind is not set correctly when calling ApplyWindV0";
//exit(-1);
return;
}
for (int j = 0; j<smoothing; j++)//at each smoothing step
{
float smoothedStrength = w.strength*(j + 1) / smoothing;
//call appropriate function based on wind direction
if (w.axis == 'X')
{
std::cout << "X winds are not implemented yet" << std::endl; return;
}
else if (w.axis == 'Y')
{
std::cout << "Y winds are not implemented yet" << std::endl; return;
}
else//w.axis=='Z'
{
StartWindZV2 <<< blockCount, blockSize >>> (hairPoints, w.axisSign, smoothedStrength);
gpuErrchk(cudaPeekAtLastError());
CollisionDetectionV1 <<< blockCount,blockSize>>>(hairPoints);
gpuErrchk(cudaPeekAtLastError());
}
}
}
//look at V0 for more info on function
//trying to unroll loops
__global__ void StartWindZV2(hair *hairPoints, char sign, float strength)
{
//look at applywind()--cpu implementation-- for more info
int tid = blockDim.x*blockIdx.x + threadIdx.x;
//change y,z points, x is always same
//if sign is +, z point gets decreased vice versa
//find effective wind strength--same on all points of the hair
#define effectiveStrengthV2 ((0.75f + (hairPoints[tid].startPoint.z) / (4 * *oHeadRadius_device))*strength)
//TODO possible performance improvement: instead of calculating Zdist&y point seperataly, do sin cos calculations(like rotation)
//calculate nEndPoint
float3 nEndPoint;
nEndPoint.x = hairPoints[tid].startPoint.x;
nEndPoint.y = (hairPoints[tid].startPoint.y - hairPoints[tid].endPoint.y)*effectiveStrengthV2 + hairPoints[tid].endPoint.y;
#define ZdistV2 (sqrtf(*oHairLength_device * *oHairLength_device - (nEndPoint.y - hairPoints[tid].startPoint.y)*(nEndPoint.y - hairPoints[tid].startPoint.y)))
if (sign == '+')
{
nEndPoint.z = hairPoints[tid].startPoint.z - ZdistV2;
}
else//sign == '-'
{
nEndPoint.z = hairPoints[tid].startPoint.z + ZdistV2;
}
float3 * nNonSmoothInterpolatedPoints, *nSmoothInterpolatedPoints;
nNonSmoothInterpolatedPoints = (float3*)malloc(sizeof(float3)*hairPoints[tid].interpolatedPointSize);
nSmoothInterpolatedPoints = (float3*)malloc(sizeof(float3)*hairPoints[tid].interpolatedPointSize);
//calculate nNonSmoothInterpolatedPoints for each interpolated point, then set nSmoothInterpolatedPoints
#pragma unroll
for (int i = 0; i < hairPoints[tid].interpolatedPointSize; i++)
{
//smoothPoint calculations
nNonSmoothInterpolatedPoints[i] = (nEndPoint - hairPoints[tid].startPoint)*((float)(i + 1)) / ((float)(hairPoints[tid].interpolatedPointSize + 1)) + hairPoints[tid].startPoint;
float nRad = atan2f(nNonSmoothInterpolatedPoints[i].y - hairPoints[tid].startPoint.y, nNonSmoothInterpolatedPoints[i].z - hairPoints[tid].startPoint.z);//the angle(in radians) of nonSmoothInterpolatedPoint[i]-startingPoint from the +Z axis(counterclockwise)
float rad = atan2f(hairPoints[tid].interpolatedPoints[i].y - hairPoints[tid].startPoint.y, hairPoints[tid].interpolatedPoints[i].z - hairPoints[tid].startPoint.z);//the angle(in radians) of interpolatedPoints[i]-startingPoint from +Z axis(counterclockwise)
float YZdistToStart = sqrtf((nNonSmoothInterpolatedPoints[i].y - hairPoints[tid].startPoint.y)*(nNonSmoothInterpolatedPoints[i].y - hairPoints[tid].startPoint.y)
+ (nNonSmoothInterpolatedPoints[i].z - hairPoints[tid].startPoint.z)*(nNonSmoothInterpolatedPoints[i].z - hairPoints[tid].startPoint.z));
nSmoothInterpolatedPoints[i].x = nNonSmoothInterpolatedPoints[i].x;
nSmoothInterpolatedPoints[i].y = YZdistToStart * sinf((nRad - rad)*(i + 1) / (hairPoints[tid].interpolatedPointSize + 1) + rad) + hairPoints[tid].startPoint.y;//equally divides the angle between nonSmoothInterpolatedPoints & interpolatedPoints, then sets the angle of smoothPoint[i] as i'th step between nonSmoothInterpolatedPoint& interpolatedPoints
nSmoothInterpolatedPoints[i].z = YZdistToStart * cosf((nRad - rad)*(i + 1) / (hairPoints[tid].interpolatedPointSize + 1) + rad) + hairPoints[tid].startPoint.z;
}
//move hair points
float3 nVector;//used for finding vector from one point to another
if (hairPoints[tid].interpolatedPointSize>0) hairPoints[tid].interpolatedPoints[0] = nSmoothInterpolatedPoints[0];
#pragma unroll
for (int i = 1; i < hairPoints[tid].interpolatedPointSize; i++)
{
//find NORMALIZED vector from interpoaltedPoints[i-1] to nSmoothInterpolatedPoints[i]
nVector = normalize(nSmoothInterpolatedPoints[i] - hairPoints[tid].interpolatedPoints[i - 1]);
//set interpolatedPoints[i] to interpolatedPoints[i-1] + nVector * hairLength/(intepolatedPointSize+1)
hairPoints[tid].interpolatedPoints[i] = hairPoints[tid].interpolatedPoints[i - 1] + (nVector / (float)(hairPoints[tid].interpolatedPointSize + 1))*sqrtf(
(nEndPoint.y - hairPoints[tid].startPoint.y)*(nEndPoint.y - hairPoints[tid].startPoint.y) + (nEndPoint.z - hairPoints[tid].startPoint.z)*(nEndPoint.z - hairPoints[tid].startPoint.z));
}
//set endPoint
if (hairPoints[tid].interpolatedPointSize > 0)
{
nVector = normalize(nEndPoint - hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1]);
hairPoints[tid].endPoint = hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1] + nVector / (float)(hairPoints[tid].interpolatedPointSize + 1)*sqrtf(
(nEndPoint.y - hairPoints[tid].startPoint.y)*(nEndPoint.y - hairPoints[tid].startPoint.y) + (nEndPoint.z - hairPoints[tid].startPoint.z)*(nEndPoint.z - hairPoints[tid].startPoint.z));
}
else
{
//no interpolation:=the hair will be a single line
hairPoints[tid].endPoint = nEndPoint;
}
//correct points
float angleInRad = atan2f((hairPoints[tid].endPoint - hairPoints[tid].startPoint).y, (hairPoints[tid].endPoint - hairPoints[tid].startPoint).z);//angle between current endPoint& +Z axis counterclockwise
float nAngleInRad = atan2f((nEndPoint - hairPoints[tid].startPoint).y, (nEndPoint - hairPoints[tid].startPoint).z); //angle between nEndPoint& +Z axis counterclockwise
float offsetAngleInRad = nAngleInRad - angleInRad; //rotate the hair this much counterclockwise
/*
2D rotating of a point around origin counterclockwise :
x' = x cos f - y sin f
y' = y cos f + x sin f
*/
//rotate endPoint & all interpolatedPoints offsetAngle degrees around startingPoint counterclockwise
float3 nPoint;//used for saving point info
nPoint.x = hairPoints[tid].endPoint.x;//x is 'same' on all points of hair, i.e when there is another wind that changes x direction this will POSSIBLY make things wrong
nPoint.y = (hairPoints[tid].endPoint.y - hairPoints[tid].startPoint.y)*cosf(offsetAngleInRad) + (hairPoints[tid].endPoint.z - hairPoints[tid].startPoint.z)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.y;
nPoint.z = (hairPoints[tid].endPoint.z - hairPoints[tid].startPoint.z)*cosf(offsetAngleInRad) - (hairPoints[tid].endPoint.y - hairPoints[tid].startPoint.y)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.z;
hairPoints[tid].endPoint = nPoint;
#pragma unroll
for (int i = 0; i < hairPoints[tid].interpolatedPointSize; i++)
{
nPoint.x = hairPoints[tid].interpolatedPoints[i].x;
nPoint.y = (hairPoints[tid].interpolatedPoints[i].y - hairPoints[tid].startPoint.y)*cosf(offsetAngleInRad) + (hairPoints[tid].interpolatedPoints[i].z - hairPoints[tid].startPoint.z)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.y;
nPoint.z = (hairPoints[tid].interpolatedPoints[i].z - hairPoints[tid].startPoint.z)*cosf(offsetAngleInRad) - (hairPoints[tid].interpolatedPoints[i].y - hairPoints[tid].startPoint.y)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.z;
hairPoints[tid].interpolatedPoints[i] = nPoint;
}
//call collision detection @ ApplyWind
//free
free(nNonSmoothInterpolatedPoints);
free(nSmoothInterpolatedPoints);
}
//look at V0 for more info on function
//trying to reduce register count from 48 to 32 since it brings occupancy 63->94
//FAILED -> trying to limit register count in the compiler settings did not cause decrease in register count
__global__ void StartWindZV1(hair *hairPoints, char sign, float strength)
{
//look at applywind()--cpu implementation-- for more info
int tid = blockDim.x*blockIdx.x + threadIdx.x;
//change y,z points, x is always same
//if sign is +, z point gets decreased vice versa
//find effective wind strength--same on all points of the hair
#define effectiveStrengthV1 ((0.75f + (hairPoints[tid].startPoint.z) / (4 * *oHeadRadius_device))*strength)
//TODO possible performance improvement: instead of calculating Zdist&y point seperataly, do sin cos calculations(like rotation)
//calculate nEndPoint
float3 nEndPoint;
nEndPoint.x = hairPoints[tid].startPoint.x;
nEndPoint.y = (hairPoints[tid].startPoint.y - hairPoints[tid].endPoint.y)*effectiveStrengthV1 + hairPoints[tid].endPoint.y;
#define ZdistV1 (sqrtf(*oHairLength_device * *oHairLength_device - (nEndPoint.y - hairPoints[tid].startPoint.y)*(nEndPoint.y - hairPoints[tid].startPoint.y)))
if (sign == '+')
{
nEndPoint.z = hairPoints[tid].startPoint.z - ZdistV1;
}
else//sign == '-'
{
nEndPoint.z = hairPoints[tid].startPoint.z + ZdistV1;
}
float3 * nNonSmoothInterpolatedPoints, *nSmoothInterpolatedPoints;
nNonSmoothInterpolatedPoints = (float3*)malloc(sizeof(float3)*hairPoints[tid].interpolatedPointSize);
nSmoothInterpolatedPoints = (float3*)malloc(sizeof(float3)*hairPoints[tid].interpolatedPointSize);
//calculate nNonSmoothInterpolatedPoints for each interpolated point, then set nSmoothInterpolatedPoints
for (int i = 0; i < hairPoints[tid].interpolatedPointSize; i++)
{
//smoothPoint calculations
nNonSmoothInterpolatedPoints[i] = (nEndPoint - hairPoints[tid].startPoint)*((float)(i + 1)) / ((float)(hairPoints[tid].interpolatedPointSize + 1)) + hairPoints[tid].startPoint;
float nRad = atan2f(nNonSmoothInterpolatedPoints[i].y - hairPoints[tid].startPoint.y, nNonSmoothInterpolatedPoints[i].z - hairPoints[tid].startPoint.z);//the angle(in radians) of nonSmoothInterpolatedPoint[i]-startingPoint from the +Z axis(counterclockwise)
float rad = atan2f(hairPoints[tid].interpolatedPoints[i].y - hairPoints[tid].startPoint.y, hairPoints[tid].interpolatedPoints[i].z - hairPoints[tid].startPoint.z);//the angle(in radians) of interpolatedPoints[i]-startingPoint from +Z axis(counterclockwise)
float YZdistToStart = sqrtf((nNonSmoothInterpolatedPoints[i].y - hairPoints[tid].startPoint.y)*(nNonSmoothInterpolatedPoints[i].y - hairPoints[tid].startPoint.y)
+ (nNonSmoothInterpolatedPoints[i].z - hairPoints[tid].startPoint.z)*(nNonSmoothInterpolatedPoints[i].z - hairPoints[tid].startPoint.z));
nSmoothInterpolatedPoints[i].x = nNonSmoothInterpolatedPoints[i].x;
nSmoothInterpolatedPoints[i].y = YZdistToStart * sinf((nRad - rad)*(i + 1) / (hairPoints[tid].interpolatedPointSize + 1) + rad) + hairPoints[tid].startPoint.y;//equally divides the angle between nonSmoothInterpolatedPoints & interpolatedPoints, then sets the angle of smoothPoint[i] as i'th step between nonSmoothInterpolatedPoint& interpolatedPoints
nSmoothInterpolatedPoints[i].z = YZdistToStart * cosf((nRad - rad)*(i + 1) / (hairPoints[tid].interpolatedPointSize + 1) + rad) + hairPoints[tid].startPoint.z;
}
//move hair points
float3 nVector;//used for finding vector from one point to another
if (hairPoints[tid].interpolatedPointSize>0) hairPoints[tid].interpolatedPoints[0] = nSmoothInterpolatedPoints[0];
for (int i = 1; i < hairPoints[tid].interpolatedPointSize; i++)
{
//find NORMALIZED vector from interpoaltedPoints[i-1] to nSmoothInterpolatedPoints[i]
nVector = normalize(nSmoothInterpolatedPoints[i] - hairPoints[tid].interpolatedPoints[i - 1]);
//set interpolatedPoints[i] to interpolatedPoints[i-1] + nVector * hairLength/(intepolatedPointSize+1)
hairPoints[tid].interpolatedPoints[i] = hairPoints[tid].interpolatedPoints[i - 1] + (nVector / (float)(hairPoints[tid].interpolatedPointSize + 1))*sqrtf(
(nEndPoint.y - hairPoints[tid].startPoint.y)*(nEndPoint.y - hairPoints[tid].startPoint.y) + (nEndPoint.z - hairPoints[tid].startPoint.z)*(nEndPoint.z - hairPoints[tid].startPoint.z));
}
//set endPoint
if (hairPoints[tid].interpolatedPointSize > 0)
{
nVector = normalize(nEndPoint - hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1]);
hairPoints[tid].endPoint = hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1] + nVector / (float)(hairPoints[tid].interpolatedPointSize + 1)*sqrtf(
(nEndPoint.y - hairPoints[tid].startPoint.y)*(nEndPoint.y - hairPoints[tid].startPoint.y) + (nEndPoint.z - hairPoints[tid].startPoint.z)*(nEndPoint.z - hairPoints[tid].startPoint.z));
}
else
{
//no interpolation:=the hair will be a single line
hairPoints[tid].endPoint = nEndPoint;
}
//correct points
float angleInRad = atan2f((hairPoints[tid].endPoint - hairPoints[tid].startPoint).y, (hairPoints[tid].endPoint - hairPoints[tid].startPoint).z);//angle between current endPoint& +Z axis counterclockwise
float nAngleInRad = atan2f((nEndPoint - hairPoints[tid].startPoint).y, (nEndPoint - hairPoints[tid].startPoint).z); //angle between nEndPoint& +Z axis counterclockwise
float offsetAngleInRad = nAngleInRad - angleInRad; //rotate the hair this much counterclockwise
/*
2D rotating of a point around origin counterclockwise :
x' = x cos f - y sin f
y' = y cos f + x sin f
*/
//rotate endPoint & all interpolatedPoints offsetAngle degrees around startingPoint counterclockwise
float3 nPoint;//used for saving point info
nPoint.x = hairPoints[tid].endPoint.x;//x is 'same' on all points of hair, i.e when there is another wind that changes x direction this will POSSIBLY make things wrong
nPoint.y = (hairPoints[tid].endPoint.y - hairPoints[tid].startPoint.y)*cosf(offsetAngleInRad) + (hairPoints[tid].endPoint.z - hairPoints[tid].startPoint.z)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.y;
nPoint.z = (hairPoints[tid].endPoint.z - hairPoints[tid].startPoint.z)*cosf(offsetAngleInRad) - (hairPoints[tid].endPoint.y - hairPoints[tid].startPoint.y)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.z;
hairPoints[tid].endPoint = nPoint;
for (int i = 0; i < hairPoints[tid].interpolatedPointSize; i++)
{
nPoint.x = hairPoints[tid].interpolatedPoints[i].x;
nPoint.y = (hairPoints[tid].interpolatedPoints[i].y - hairPoints[tid].startPoint.y)*cosf(offsetAngleInRad) + (hairPoints[tid].interpolatedPoints[i].z - hairPoints[tid].startPoint.z)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.y;
nPoint.z = (hairPoints[tid].interpolatedPoints[i].z - hairPoints[tid].startPoint.z)*cosf(offsetAngleInRad) - (hairPoints[tid].interpolatedPoints[i].y - hairPoints[tid].startPoint.y)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.z;
hairPoints[tid].interpolatedPoints[i] = nPoint;
}
//call collision detection @ ApplyWind
//free
free(nNonSmoothInterpolatedPoints);
free(nSmoothInterpolatedPoints);
}
//each thread does work on a single hair
//gets hairPoints, hairPartitionSize, wind axis sign, wind strength
//receives hair length from oHairLength--saved@ SaveOriginalSettings
//hair point is pointer to ONE hair strand data
//wind axis sign determines the clockwise-counterclokwise'ness of hair rotation
//wind strength determines the angle of hair rotation
//returns void, changes positions of ONE hair strand based on the type of wind
//other versions are StartWindY, StartWindX
__global__ void StartWindZV0(hair *hairPoints, char sign, float strength)
{
//look at applywind()--cpu implementation-- for more info
int tid = blockDim.x*blockIdx.x + threadIdx.x;
//change y,z points, x is always same
//if sign is +, z point gets decreased vice versa
//find effective wind strength--same on all points of the hair
float effectiveStrength = (0.75f + (hairPoints[tid].startPoint.z) / (4 * *oHeadRadius_device))*strength;
//TODO possible performance improvement: instead of calculating Zdist&y point seperataly, do sin cos calculations(like rotation)
//calculate nEndPoint
float3 nEndPoint;
nEndPoint.x = hairPoints[tid].startPoint.x;
nEndPoint.y = (hairPoints[tid].startPoint.y - hairPoints[tid].endPoint.y)*effectiveStrength + hairPoints[tid].endPoint.y;
float Zdist = sqrtf(*oHairLength_device * *oHairLength_device - (nEndPoint.y - hairPoints[tid].startPoint.y)*(nEndPoint.y - hairPoints[tid].startPoint.y));
if (sign == '+')
{
nEndPoint.z = hairPoints[tid].startPoint.z - Zdist;
}
else//sign == '-'
{
nEndPoint.z = hairPoints[tid].startPoint.z + Zdist;
}
float3 * nNonSmoothInterpolatedPoints, *nSmoothInterpolatedPoints;
nNonSmoothInterpolatedPoints= (float3*)malloc(sizeof(float3)*hairPoints[tid].interpolatedPointSize);
nSmoothInterpolatedPoints = (float3*)malloc(sizeof(float3)*hairPoints[tid].interpolatedPointSize);
//calculate nNonSmoothInterpolatedPoints for each interpolated point, then set nSmoothInterpolatedPoints
for (int i = 0; i < hairPoints[tid].interpolatedPointSize; i++)
{
//smoothPoint calculations
nNonSmoothInterpolatedPoints[i] = (nEndPoint - hairPoints[tid].startPoint)*((float)(i + 1)) / ((float)(hairPoints[tid].interpolatedPointSize + 1)) + hairPoints[tid].startPoint;
float nRad = atan2f(nNonSmoothInterpolatedPoints[i].y - hairPoints[tid].startPoint.y, nNonSmoothInterpolatedPoints[i].z - hairPoints[tid].startPoint.z);//the angle(in radians) of nonSmoothInterpolatedPoint[i]-startingPoint from the +Z axis(counterclockwise)
float rad = atan2f(hairPoints[tid].interpolatedPoints[i].y - hairPoints[tid].startPoint.y, hairPoints[tid].interpolatedPoints[i].z - hairPoints[tid].startPoint.z);//the angle(in radians) of interpolatedPoints[i]-startingPoint from +Z axis(counterclockwise)
float YZdistToStart = sqrtf((nNonSmoothInterpolatedPoints[i].y - hairPoints[tid].startPoint.y)*(nNonSmoothInterpolatedPoints[i].y - hairPoints[tid].startPoint.y)
+ (nNonSmoothInterpolatedPoints[i].z - hairPoints[tid].startPoint.z)*(nNonSmoothInterpolatedPoints[i].z - hairPoints[tid].startPoint.z));
nSmoothInterpolatedPoints[i].x = nNonSmoothInterpolatedPoints[i].x;
nSmoothInterpolatedPoints[i].y = YZdistToStart * sinf((nRad - rad)*(i + 1) / (hairPoints[tid].interpolatedPointSize + 1) + rad) + hairPoints[tid].startPoint.y;//equally divides the angle between nonSmoothInterpolatedPoints & interpolatedPoints, then sets the angle of smoothPoint[i] as i'th step between nonSmoothInterpolatedPoint& interpolatedPoints
nSmoothInterpolatedPoints[i].z = YZdistToStart * cosf((nRad - rad)*(i + 1) / (hairPoints[tid].interpolatedPointSize + 1) + rad) + hairPoints[tid].startPoint.z;
}
//move hair points
float3 nVector;//used for finding vector from one point to another
if (hairPoints[tid].interpolatedPointSize>0) hairPoints[tid].interpolatedPoints[0] = nSmoothInterpolatedPoints[0];
for (int i = 1; i < hairPoints[tid].interpolatedPointSize; i++)
{
//find NORMALIZED vector from interpoaltedPoints[i-1] to nSmoothInterpolatedPoints[i]
nVector = normalize(nSmoothInterpolatedPoints[i] - hairPoints[tid].interpolatedPoints[i - 1]);
//set interpolatedPoints[i] to interpolatedPoints[i-1] + nVector * hairLength/(intepolatedPointSize+1)
hairPoints[tid].interpolatedPoints[i] = hairPoints[tid].interpolatedPoints[i - 1] + (nVector / (float)(hairPoints[tid].interpolatedPointSize + 1))*sqrtf(
(nEndPoint.y - hairPoints[tid].startPoint.y)*(nEndPoint.y - hairPoints[tid].startPoint.y) + (nEndPoint.z - hairPoints[tid].startPoint.z)*(nEndPoint.z - hairPoints[tid].startPoint.z));
}
//set endPoint
if (hairPoints[tid].interpolatedPointSize > 0)
{
nVector = normalize(nEndPoint - hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1]);
hairPoints[tid].endPoint = hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1] + nVector / (float)(hairPoints[tid].interpolatedPointSize + 1)*sqrtf(
(nEndPoint.y - hairPoints[tid].startPoint.y)*(nEndPoint.y - hairPoints[tid].startPoint.y) + (nEndPoint.z - hairPoints[tid].startPoint.z)*(nEndPoint.z - hairPoints[tid].startPoint.z));
}
else
{
//no interpolation:=the hair will be a single line
hairPoints[tid].endPoint = nEndPoint;
}
//correct points
float angleInRad = atan2f((hairPoints[tid].endPoint - hairPoints[tid].startPoint).y, (hairPoints[tid].endPoint - hairPoints[tid].startPoint).z);//angle between current endPoint& +Z axis counterclockwise
float nAngleInRad = atan2f((nEndPoint - hairPoints[tid].startPoint).y, (nEndPoint - hairPoints[tid].startPoint).z); //angle between nEndPoint& +Z axis counterclockwise
float offsetAngleInRad = nAngleInRad - angleInRad;//rotate the hair this much counterclockwise
/*
2D rotating of a point around origin counterclockwise :
x' = x cos f - y sin f
y' = y cos f + x sin f
*/
//rotate endPoint & all interpolatedPoints offsetAngle degrees around startingPoint counterclockwise
float3 nPoint;//used for saving point info
nPoint.x = hairPoints[tid].endPoint.x;//x is 'same' on all points of hair, i.e when there is another wind that changes x direction this will POSSIBLY make things wrong
nPoint.y = (hairPoints[tid].endPoint.y - hairPoints[tid].startPoint.y)*cosf(offsetAngleInRad) + (hairPoints[tid].endPoint.z - hairPoints[tid].startPoint.z)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.y;
nPoint.z = (hairPoints[tid].endPoint.z - hairPoints[tid].startPoint.z)*cosf(offsetAngleInRad) - (hairPoints[tid].endPoint.y - hairPoints[tid].startPoint.y)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.z;
hairPoints[tid].endPoint = nPoint;
for (int i = 0; i < hairPoints[tid].interpolatedPointSize; i++)
{
nPoint.x = hairPoints[tid].interpolatedPoints[i].x;
nPoint.y = (hairPoints[tid].interpolatedPoints[i].y - hairPoints[tid].startPoint.y)*cosf(offsetAngleInRad) + (hairPoints[tid].interpolatedPoints[i].z - hairPoints[tid].startPoint.z)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.y;
nPoint.z = (hairPoints[tid].interpolatedPoints[i].z - hairPoints[tid].startPoint.z)*cosf(offsetAngleInRad) - (hairPoints[tid].interpolatedPoints[i].y - hairPoints[tid].startPoint.y)*sinf(offsetAngleInRad) +
hairPoints[tid].startPoint.z;
hairPoints[tid].interpolatedPoints[i] = nPoint;
}
//call collision detection @ ApplyWind
//free
free(nNonSmoothInterpolatedPoints);
free(nSmoothInterpolatedPoints);
}
//trying to unroll loops
__global__ void CollisionDetectionV1(hair* hairPoints)
{
int tid = blockDim.x*blockIdx.x + threadIdx.x;
//calculate minorLength which is the length of the hypothenuse of the O'-startingPoint.y-StartingPoint.z (O' is the center of the smaller circle which is on the same X point as rest of the points on this hair strand)
float3 minorCenter;//center of the minor circle that passes through startPoint
minorCenter.x = hairPoints[tid].startPoint.x;
minorCenter.y = 0;
minorCenter.z = 0;
float minorLength = length(hairPoints[tid].startPoint - minorCenter);
//we want the hair points(except startingpoint) to be 0.1f away from the head, simply push the hair 0.1f up from the head which is approx. 0.1f away from the origin
//check the interpolatedPoints
#pragma unroll
for (int i = 0; i < hairPoints[tid].interpolatedPointSize; i++)
{
//collision detected
if (length(hairPoints[tid].interpolatedPoints[i] - minorCenter) < minorLength + 0.1f)//remember that we wanted the hairPoints to be at an offset of 0.1f
{
//how to:
//find a newPoint that is rotated out the head(vertically)
//move all points (from hairPoints[tid].interpolatedPoints[i+1] to endPoint) to new positions while keeping the distance&angle between j'th & j-1'th point same
//set the hairPoints[tid].interpolatedPoints[i] as new point
//for more explanations on degrees etc. check the drawings
float n;//distance between 2 consecutive points on a hair strand
float gammaRad;//the angle(in radians) between previous point & +Z axis counterclockwise
float alphaPrimeRad;//the angle(in radians) between newPoint & previous point @origin counterclockwise
if (i == 0)//if first interpolatedPoint is inside head
{
n = length(hairPoints[tid].interpolatedPoints[0] - hairPoints[tid].startPoint);
//first edge is minorLength --> distance between startPoint&minorCenter, is always |minorLength|
//second edge is minorLength+0.1f
//third edge is n
alphaPrimeRad = acosf((minorLength*minorLength + (minorLength + 0.1f)*(minorLength + 0.1f) - n * n) /
(2 * minorLength*(minorLength + 0.1f)));
if (isnan(alphaPrimeRad))alphaPrimeRad = PI;
gammaRad = atan2f(hairPoints[tid].startPoint.y, hairPoints[tid].startPoint.z);
}
else//non first interpolatedPoint is inside head
{
n = length(hairPoints[tid].interpolatedPoints[i] - hairPoints[tid].interpolatedPoints[i - 1]);
//first edge is prev distance=length(previous point- minorcenter), which is sometimes equal to minorLength+0.1f
//second edge is minorLength+0.1f
//third edge is n
float prevDistance = length(hairPoints[tid].interpolatedPoints[i - 1] - minorCenter);
alphaPrimeRad = acosf((prevDistance*prevDistance + (minorLength + 0.1f)*(minorLength + 0.1f) - n * n) /
(2 * prevDistance * (minorLength + 0.1f)));
if (isnan(alphaPrimeRad)) alphaPrimeRad = PI;
gammaRad = atan2f(hairPoints[tid].interpolatedPoints[i - 1].y, hairPoints[tid].interpolatedPoints[i - 1].z);
}
float3 prevPoint = (i > 0 ? hairPoints[tid].interpolatedPoints[i - 1] : hairPoints[tid].startPoint);//the point that comes before this point
float3 newPoint;
newPoint.x = hairPoints[tid].interpolatedPoints[i].x;
if ((prevPoint.y - n)*(prevPoint.y - n) + prevPoint.z*prevPoint.z>(minorLength + 0.1f)*(minorLength + 0.1f))//the hair falls down
{
newPoint.y = prevPoint.y - n;
newPoint.z = prevPoint.z;
}
else//the hair follows curvature of the head
{
newPoint.y = (minorLength + 0.1f)*(sinf(alphaPrimeRad + gammaRad));
newPoint.z = (minorLength + 0.1f)*(cosf(alphaPrimeRad + gammaRad));
}
//set rest of the points -- keep the distance & degrees between 2 consecutive points same
//find vectors to add the previous point, which will give us the new points,
float3 *moveVectors;//stores data on change of coordinates in consecutive points on hair, including endPoint
moveVectors = (float3*)malloc(sizeof(float3)*(hairPoints[tid].interpolatedPointSize - i));
#pragma unroll
for (int j = i + 1; j < hairPoints[tid].interpolatedPointSize; j++)
{
moveVectors[j - i - 1] = hairPoints[tid].interpolatedPoints[j] - hairPoints[tid].interpolatedPoints[j - 1];
}
moveVectors[hairPoints[tid].interpolatedPointSize - i - 1] = hairPoints[tid].endPoint - hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1];//set vector between end point - last interpolated point
//set this point as new point
hairPoints[tid].interpolatedPoints[i] = newPoint;
//add respective moveVectors to rest of the interpolatedPoints
#pragma unroll
for (int j = i + 1; j < hairPoints[tid].interpolatedPointSize; j++) hairPoints[tid].interpolatedPoints[j] = hairPoints[tid].interpolatedPoints[j - 1] + moveVectors[j - i - 1];
//add moveVectors[-1] to endPoint
hairPoints[tid].endPoint = hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1] + moveVectors[hairPoints[tid].interpolatedPointSize - i - 1];
free(moveVectors);
}
}
//check the endPoint
if (length(hairPoints[tid].endPoint - minorCenter) < minorLength + 0.1f)
{
float n;//distance between 2 consecutive points on a hair strand
float gammaRad;//the angle(in radians) between previous point & +Z axis counterclockwise
float alphaPrimeRad;//the angle(in radians) between newPoint & +Z axis counterclockwise
//calculate gammaRad & alphaPrimeRad
if (hairPoints[tid].interpolatedPointSize > 0)//when there is at least 1 interpolatedPoint
{
n = length(hairPoints[tid].endPoint - hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1]);
gammaRad = atan2f(hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1].y, hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1].z);
//first edge is prev distance=length(previous point- minorcenter), which is sometimes equal to minorLength+0.1f
//second edge is minorLength+0.1f
//third edge is n
float prevDistance = length(hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1] - minorCenter);
alphaPrimeRad = acosf((prevDistance*prevDistance + (minorLength + 0.1f)*(minorLength + 0.1f) - n * n) /
(2 * prevDistance * (minorLength + 0.1f)));
if (isnan(alphaPrimeRad)) alphaPrimeRad = PI;//acosf can return nan
}
else//no interpolation --> previous point is starting point
{
n = length(hairPoints[tid].endPoint - hairPoints[tid].startPoint);
gammaRad = atan2f(hairPoints[tid].startPoint.y, hairPoints[tid].startPoint.z);
//first edge is minorlength
//second edge is minorLength+0.1f
//third edge is n
alphaPrimeRad = acosf((2 * minorLength*(minorLength + 0.1f) - n * n) /
(2 * minorLength*(minorLength + 0.1f)));
if (isnan(alphaPrimeRad)) alphaPrimeRad = PI;//acosf can return nan
}
float3 prevPoint = (hairPoints[tid].interpolatedPointSize > 0 ? hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1] : hairPoints[tid].startPoint);//the point that comes before end point
//rotate endPoint out of the head
if ((prevPoint.y - n)*(prevPoint.y - n) + prevPoint.z*prevPoint.z>(minorLength + 0.1f)*(minorLength + 0.1f))//hair falls down
{
hairPoints[tid].endPoint.y = prevPoint.y - n;
hairPoints[tid].endPoint.z = prevPoint.z;
}
else//hair follows curvature of the head
{
hairPoints[tid].endPoint.y = (minorLength + 0.1f)*(sinf(alphaPrimeRad + gammaRad));
hairPoints[tid].endPoint.z = (minorLength + 0.1f)*(cosf(alphaPrimeRad + gammaRad));
}
}
}
//gets hairPoints
//receives head radius from oHeadRadius
//if there is a collision between any point on hair(except startingPoint) & head, rotates the hair out of the head in +Y direction, does NOT CHANGES x point of the hair
//returns void, changes hair points' positions that are inside the head
__global__ void CollisionDetectionV0(hair* hairPoints)
{
int tid = blockDim.x*blockIdx.x + threadIdx.x;
//calculate minorLength which is the length of the hypothenuse of the O'-startingPoint.y-StartingPoint.z (O' is the center of the smaller circle which is on the same X point as rest of the points on this hair strand)
float3 minorCenter;//center of the minor circle that passes through startPoint
minorCenter.x = hairPoints[tid].startPoint.x;
minorCenter.y = 0;
minorCenter.z = 0;
float minorLength = length(hairPoints[tid].startPoint - minorCenter);
//we want the hair points(except startingpoint) to be 0.1f away from the head, simply push the hair 0.1f up from the head which is approx. 0.1f away from the origin
//check the interpolatedPoints
for (int i = 0; i < hairPoints[tid].interpolatedPointSize; i++)
{
//collision detected
if (length(hairPoints[tid].interpolatedPoints[i] - minorCenter) < minorLength + 0.1f)//remember that we wanted the hairPoints to be at an offset of 0.1f
{
//how to:
//find a newPoint that is rotated out the head(vertically)
//move all points (from hairPoints[tid].interpolatedPoints[i+1] to endPoint) to new positions while keeping the distance&angle between j'th & j-1'th point same
//set the hairPoints[tid].interpolatedPoints[i] as new point
//for more explanations on degrees etc. check the drawings
float n;//distance between 2 consecutive points on a hair strand
float gammaRad;//the angle(in radians) between previous point & +Z axis counterclockwise
float alphaPrimeRad;//the angle(in radians) between newPoint & previous point @origin counterclockwise
if (i == 0)//if first interpolatedPoint is inside head
{
n = length(hairPoints[tid].interpolatedPoints[0] - hairPoints[tid].startPoint);
//first edge is minorLength --> distance between startPoint&minorCenter, is always |minorLength|
//second edge is minorLength+0.1f
//third edge is n
alphaPrimeRad = acosf((minorLength*minorLength + (minorLength + 0.1f)*(minorLength + 0.1f) - n * n) /
(2 * minorLength*(minorLength + 0.1f)));
if (isnan(alphaPrimeRad))alphaPrimeRad = PI;
gammaRad = atan2f(hairPoints[tid].startPoint.y, hairPoints[tid].startPoint.z);
}
else//non first interpolatedPoint is inside head
{
n = length(hairPoints[tid].interpolatedPoints[i] - hairPoints[tid].interpolatedPoints[i - 1]);
//first edge is prev distance=length(previous point- minorcenter), which is sometimes equal to minorLength+0.1f
//second edge is minorLength+0.1f
//third edge is n
float prevDistance = length(hairPoints[tid].interpolatedPoints[i - 1] - minorCenter);
alphaPrimeRad = acosf((prevDistance*prevDistance + (minorLength + 0.1f)*(minorLength + 0.1f) - n * n) /
(2 * prevDistance * (minorLength + 0.1f)));
if (isnan(alphaPrimeRad)) alphaPrimeRad = PI;
gammaRad = atan2f(hairPoints[tid].interpolatedPoints[i - 1].y, hairPoints[tid].interpolatedPoints[i - 1].z);
}
float3 prevPoint = (i > 0 ? hairPoints[tid].interpolatedPoints[i - 1] : hairPoints[tid].startPoint);//the point that comes before this point
float3 newPoint;
newPoint.x = hairPoints[tid].interpolatedPoints[i].x;
if ((prevPoint.y - n)*(prevPoint.y - n) + prevPoint.z*prevPoint.z>(minorLength + 0.1f)*(minorLength + 0.1f))//the hair falls down
{
newPoint.y = prevPoint.y - n;
newPoint.z = prevPoint.z;
}
else//the hair follows curvature of the head
{
newPoint.y = (minorLength + 0.1f)*(sinf(alphaPrimeRad + gammaRad));
newPoint.z = (minorLength + 0.1f)*(cosf(alphaPrimeRad + gammaRad));
}
//set rest of the points -- keep the distance & degrees between 2 consecutive points same
//find vectors to add the previous point, which will give us the new points,
float3 *moveVectors;//stores data on change of coordinates in consecutive points on hair, including endPoint
moveVectors = (float3*)malloc(sizeof(float3)*(hairPoints[tid].interpolatedPointSize - i));
for (int j = i + 1; j < hairPoints[tid].interpolatedPointSize; j++)
{
moveVectors[j - i - 1] = hairPoints[tid].interpolatedPoints[j] - hairPoints[tid].interpolatedPoints[j - 1];
}
moveVectors[hairPoints[tid].interpolatedPointSize - i - 1] = hairPoints[tid].endPoint - hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1];//set vector between end point - last interpolated point
//set this point as new point
hairPoints[tid].interpolatedPoints[i] = newPoint;
//add respective moveVectors to rest of the interpolatedPoints
for (int j = i + 1; j < hairPoints[tid].interpolatedPointSize; j++) hairPoints[tid].interpolatedPoints[j] = hairPoints[tid].interpolatedPoints[j - 1] + moveVectors[j - i - 1];
//add moveVectors[-1] to endPoint
hairPoints[tid].endPoint = hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1] + moveVectors[hairPoints[tid].interpolatedPointSize - i - 1];
free(moveVectors);
}
}
//check the endPoint
if (length(hairPoints[tid].endPoint - minorCenter) < minorLength + 0.1f)
{
float n;//distance between 2 consecutive points on a hair strand
float gammaRad;//the angle(in radians) between previous point & +Z axis counterclockwise
float alphaPrimeRad;//the angle(in radians) between newPoint & +Z axis counterclockwise
//calculate gammaRad & alphaPrimeRad
if (hairPoints[tid].interpolatedPointSize > 0)//when there is at least 1 interpolatedPoint
{
n = length(hairPoints[tid].endPoint - hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1]);
gammaRad = atan2f(hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1].y, hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1].z);
//first edge is prev distance=length(previous point- minorcenter), which is sometimes equal to minorLength+0.1f
//second edge is minorLength+0.1f
//third edge is n
float prevDistance = length(hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1] - minorCenter);
alphaPrimeRad = acosf((prevDistance*prevDistance + (minorLength + 0.1f)*(minorLength + 0.1f) - n * n) /
(2 * prevDistance * (minorLength + 0.1f)));
if (isnan(alphaPrimeRad)) alphaPrimeRad = PI;//acosf can return nan
}
else//no interpolation --> previous point is starting point
{
n = length(hairPoints[tid].endPoint - hairPoints[tid].startPoint);
gammaRad = atan2f(hairPoints[tid].startPoint.y, hairPoints[tid].startPoint.z);
//first edge is minorlength
//second edge is minorLength+0.1f
//third edge is n
alphaPrimeRad = acosf((2 * minorLength*(minorLength + 0.1f) - n * n) /
(2 * minorLength*(minorLength + 0.1f)));
if (isnan(alphaPrimeRad)) alphaPrimeRad = PI;//acosf can return nan
}
float3 prevPoint = (hairPoints[tid].interpolatedPointSize > 0 ? hairPoints[tid].interpolatedPoints[hairPoints[tid].interpolatedPointSize - 1] : hairPoints[tid].startPoint);//the point that comes before end point
//rotate endPoint out of the head
if ((prevPoint.y - n)*(prevPoint.y - n) + prevPoint.z*prevPoint.z>(minorLength + 0.1f)*(minorLength + 0.1f))//hair falls down
{
hairPoints[tid].endPoint.y = prevPoint.y - n;
hairPoints[tid].endPoint.z = prevPoint.z;
}
else//hair follows curvature of the head
{
hairPoints[tid].endPoint.y = (minorLength + 0.1f)*(sinf(alphaPrimeRad + gammaRad));
hairPoints[tid].endPoint.z = (minorLength + 0.1f)*(cosf(alphaPrimeRad + gammaRad));
}
}
}
//frees hairPoints_device
void FreeAllGPU(hair *hairPoints)
{
//for (int i = 0; i < oHairCount_kernel; i++) free((hairPoints[i].interpolatedPoints));// interpolated points are already free'd with h_data.interpoalted point since they point to the same point on memory
gpuErrchk(cudaFree(hairPoints));
}
|
e766c833dd29d1bd40128f4d54ec611c513b8023.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/indexed_threshold_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_threshold_positive(const int num, const int dim,
const float threshold, const float scale,
const Dtype* index_data, const Dtype* bottom_data,
Dtype* diff_data, Dtype* weight_data) {
CUDA_KERNEL_LOOP(i, num) {
auto index = static_cast<int>(index_data[i]);
if (bottom_data[dim * i + index] < threshold) {
diff_data[dim * i + index] = scale * (bottom_data[dim * i + index] - threshold);
weight_data[dim * i + index] = scale;
} else {
diff_data[dim * i + index] = 0;
}
}
}
template <typename Dtype>
void IndexedThresholdLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int count = bottom[0]->count();
auto index_data = bottom[1]->gpu_data();
auto bottom_data = bottom[0]->gpu_data();
auto diff_data = diff_.mutable_gpu_data();
auto weights_data = weights_.mutable_gpu_data();
auto dim = bottom[0]->count(index_axis_ + 1);
const Dtype alpha = sqrt(null_scale_);
caffe_gpu_set(count, alpha, weights_data);
if (null_scale_ == 1) {
caffe_copy(count, bottom_data, diff_data);
} else {
caffe_gpu_axpy(
count, // count
alpha, // alpha
bottom_data, // a
diff_data); // b
}
kernel_threshold_positive<Dtype> << <CAFFE_GET_BLOCKS(outer_num_),
CAFFE_CUDA_NUM_THREADS >> >(outer_num_, dim,
threshold_, sqrt(positive_scale_),
index_data, bottom_data,
diff_data, weights_data);
Dtype dot;
caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot);
Dtype loss = dot / bottom[0]->num() / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void IndexedThresholdLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << type()
<< " Layer cannot backpropagate to index inputs.";
}
if (!propagate_down[0])
return;
int count = bottom[0]->count();
caffe_gpu_mul(count, weights_.gpu_data(), diff_.gpu_data(), diff_.mutable_gpu_data());
// Scale gradiant by loss
const Dtype alpha = top[0]->cpu_diff()[0] / bottom[0]->num();
caffe_gpu_axpy(
count, // count
alpha, // alpha
diff_.gpu_data(), // a
bottom[0]->mutable_gpu_diff()); // b
}
INSTANTIATE_LAYER_GPU_FUNCS(IndexedThresholdLossLayer);
} // namespace caffe
| e766c833dd29d1bd40128f4d54ec611c513b8023.cu | #include <vector>
#include "caffe/layers/indexed_threshold_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_threshold_positive(const int num, const int dim,
const float threshold, const float scale,
const Dtype* index_data, const Dtype* bottom_data,
Dtype* diff_data, Dtype* weight_data) {
CUDA_KERNEL_LOOP(i, num) {
auto index = static_cast<int>(index_data[i]);
if (bottom_data[dim * i + index] < threshold) {
diff_data[dim * i + index] = scale * (bottom_data[dim * i + index] - threshold);
weight_data[dim * i + index] = scale;
} else {
diff_data[dim * i + index] = 0;
}
}
}
template <typename Dtype>
void IndexedThresholdLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int count = bottom[0]->count();
auto index_data = bottom[1]->gpu_data();
auto bottom_data = bottom[0]->gpu_data();
auto diff_data = diff_.mutable_gpu_data();
auto weights_data = weights_.mutable_gpu_data();
auto dim = bottom[0]->count(index_axis_ + 1);
const Dtype alpha = sqrt(null_scale_);
caffe_gpu_set(count, alpha, weights_data);
if (null_scale_ == 1) {
caffe_copy(count, bottom_data, diff_data);
} else {
caffe_gpu_axpy(
count, // count
alpha, // alpha
bottom_data, // a
diff_data); // b
}
kernel_threshold_positive<Dtype> << <CAFFE_GET_BLOCKS(outer_num_),
CAFFE_CUDA_NUM_THREADS >> >(outer_num_, dim,
threshold_, sqrt(positive_scale_),
index_data, bottom_data,
diff_data, weights_data);
Dtype dot;
caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot);
Dtype loss = dot / bottom[0]->num() / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void IndexedThresholdLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << type()
<< " Layer cannot backpropagate to index inputs.";
}
if (!propagate_down[0])
return;
int count = bottom[0]->count();
caffe_gpu_mul(count, weights_.gpu_data(), diff_.gpu_data(), diff_.mutable_gpu_data());
// Scale gradiant by loss
const Dtype alpha = top[0]->cpu_diff()[0] / bottom[0]->num();
caffe_gpu_axpy(
count, // count
alpha, // alpha
diff_.gpu_data(), // a
bottom[0]->mutable_gpu_diff()); // b
}
INSTANTIATE_LAYER_GPU_FUNCS(IndexedThresholdLossLayer);
} // namespace caffe
|
129f55e251e7b3dcd1cdd2da698f80638b33e08e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <conio.h>
#include <time.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <string>
#include <chrono>
#include <thread>
#include <Windows.h>
using namespace std;
#define OBJETIVO 2048
#define DESP_POS 1
#define DESP_NEG -1
#define NO_DESP 0
#ifdef __INTELLISENSE__
void __syncthreads(); // Para evitar el error que da el intellisense con syncthreads y atomicadd
void atomicAdd(int *a, int b);
#endif
// Variables globales para recoger por parmetros
struct dimensionesMatriz {
int numFilas;
int numColumnas;
} dimMatriz;
dim3 dimGrid; // Grid de bloques
dim3 dimBlock; // Hilos por bloque
// Variables de control
// Juego automtico o manual
bool automatico;
// N de bytes que ocupa la matriz
int bytesMatriz;
// Dificultad del juego
bool modoDiablo;
// Control de vidas:
int vidas;
// Funciones de juego
__host__ void juegoAutomatico(int *h_matriz, int *h_matrizResultado, int *d_matrizEntrada, int *d_matrizSalida);
__host__ void juegoManual(int *h_matriz, int *h_matrizResultado, int *d_matrizEntrada, int *d_matrizSalida);
// Funciones auxiliares en Device
__device__ int getElemento(int *matriz, int fila, int columna, dimensionesMatriz* d_dimMatriz);
__device__ void setElemento(int *matriz, int fila, int columna, int elemento, dimensionesMatriz* d_dimMatriz);
// Kernels
// Kernel movimiento
__global__ void kernelDesplazar(int *h_matrizEntrada, int *h_matrizSalida, int *despVertical, int *despHorizontal, dimensionesMatriz* d_dimMatriz);
// Kernels auxiliares
__global__ void kernelSuma(int *h_matrizEntrada, int *h_matrizSalida, int *d_puntuacion, int *despVertical, int *despHorizontal, dimensionesMatriz* d_dimMatriz);
__global__ void kernelCopiarMatriz(int *d_matrizCopia, int *d_matrizPega, dimensionesMatriz* d_dimMatriz);
__global__ void kernelSetMatrizCeros(int *matriz, dimensionesMatriz* d_dimMatriz);
__global__ void kernelSumaYDesplazaDerecha(int *d_matrizEntrada, int *d_matrizSalida, int *d_puntuacion, dimensionesMatriz* d_dimMatriz);
// Funciones auxiliares de comprobacin de estado de la matriz
__global__ void kernelComprobarIguales(int *d_matrizUno, int *d_matrizDos, bool* d_sonIguales, dimensionesMatriz* d_dimMatriz);
__global__ void kernelComprobarLlena(int *d_matrizUno, bool* d_estaLlena, dimensionesMatriz* d_dimMatriz);
__global__ void kernelComprobarMovimientosPosibles(int *d_matriz, bool *seguirJugando, dimensionesMatriz* d_dimMatriz);
__global__ void kernelComprobarSiHaGanado(int *d_matriz, bool* d_haGanado, dimensionesMatriz* d_dimMatriz);
// Funciones auxiliares en Host
__host__ void caracteristicasTarjeta();
__host__ void leerParametros(int argc, const char* argv[]);
// Operaciones con matrices
__host__ void inicializarMatriz(int *h_matriz);
__host__ void rellenarMatrizconcero(int *h_matriz);
__host__ void pintarMatriz(int *h_matriz);
__host__ void copiarMatriz(int *h_matrizCopia, int *h_matrizPega);
__host__ void setElementoHost(int *h_matriz, int fila, int columna, int elemento);
__host__ void nuevaSemilla(int *h_matriz, int numSemillas);
// Comprobadores
__host__ bool estaLlena(int* d_matriz);
__host__ bool finJuego(int* d_matriz);
__host__ bool movimientosPosibles(int* d_matriz);
// Funciones de host de carga y guardado de matrices:
__host__ void escribirMatriz(int* h_matriz, string nombreJugador, int* puntuacion, int* movimientos);
__host__ bool leerMatriz(int* h_matriz, string nombreJugador, int* puntuacion, int* movimientos);
// Funcion de movimiento en Host
__host__ bool desplazarMatriz(int *h_matrizEntrada, int *h_matrizSalida, int *d_matrizEntrada, int *d_matrizSalida, int *h_puntuacion, int despVertical, int despHorizontal);
// MAIN
int main(int argc, const char* argv[])
{
leerParametros(argc, argv);
// Declaracion de matrices en host:
int* h_matriz = (int *)malloc(bytesMatriz);
int* h_matrizResultado = (int *)malloc(bytesMatriz);
// Punteros a matrices en DEVICE:
int *d_matrizEntrada;
int *d_matrizSalida;
// Reserva de memoria en DEVICE
hipMalloc((void **)&d_matrizEntrada, bytesMatriz);
hipMalloc((void **)&d_matrizSalida, bytesMatriz);
// Relleno las matrices con 0s:
rellenarMatrizconcero(h_matriz);
rellenarMatrizconcero(h_matrizResultado);
if (automatico)
juegoAutomatico(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida);
else
juegoManual(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida);
// Libero la memoria de device
hipFree(d_matrizEntrada);
hipFree(d_matrizSalida);
return 0;
}
// ----------- MODOS DE JUEGO ----------- //
__host__ void juegoAutomatico(int *h_matriz, int *h_matrizResultado, int *d_matrizEntrada, int *d_matrizSalida)
{
cout << "+--------------------------------------------------------+" << endl;
cout << "| Bienvenido al 16384, se ha elegido el modo automatico. |" << endl;
cout << "+--------------------------------------------------------+" << endl;
inicializarMatriz(h_matriz);
// Se comprueban las caracteristicas de la tarjeta
cout << "+--------------------------------------------------------+" << endl;
caracteristicasTarjeta();
cout << "+--------------------------------------------------------+" << endl;
cout << endl;
system("pause");
system("cls");
// Contador de movimientos
int movimientos = 0;
int puntuacion = 0;
vidas = 5;
// Variable control de entrada
bool seguirJugando = false;
bool ganado = false;
while (!ganado && vidas > 0)
{
// Eligo un movimiento aleatorio
int movimiento = rand() % 4;
system("CLS");
// Y lo hago
switch (movimiento)
{
// PARAMETROS DESPLAZAR_MATRIZ -> matriz inicial, matriz resultado, desplazamiento eje y, desplazamiento eje x
case 0:
cout << "Muevo arriba " << endl;
seguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, DESP_NEG, NO_DESP); // Desplazar arriba
ganado = finJuego(d_matrizSalida);
break;
case 1:
cout << "Muevo abajo " << endl;
seguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, DESP_POS, NO_DESP); // Desplazar abajo
ganado = finJuego(d_matrizSalida);
break;
case 2:
cout << "Muevo izquierda " << endl;
seguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, NO_DESP, DESP_NEG); // Desplazar izquierda
ganado = finJuego(d_matrizSalida);
break;
case 3:
cout << "Muevo derecha " << endl;
seguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, NO_DESP, DESP_POS); // Desplazar derecha
ganado = finJuego(d_matrizSalida);
break;
}
movimientos++;
copiarMatriz(h_matrizResultado, h_matriz);
cout << "+------------------------------------------------------------+" << endl;
printf("|Movimiento: %d\tPuntuacion: %d\t Vidas: %d \n", movimientos, puntuacion, vidas);
cout << "+------------------------------------------------------------+" << endl;
pintarMatriz(h_matriz);
if (!seguirJugando && vidas > 1)
{
cout << "+---------------------------------------------------------------------------------------------+" << endl;
cout << "| No hay mas movimientos posibles, la maquina ha perdido. Hemos suspendido el test de Turing. |" << endl;
cout << "+---------------------------------------------------------------------------------------------+" << endl;
vidas -= 1;
cout << endl;
cout << "+---------------------------------------------------------------------------------------------+" << endl;
cout << "| Lo intentamos de nuevo (si/no)?. |" << endl;
cout << "+---------------------------------------------------------------------------------------------+" << endl;
string otraVez;
cin >> otraVez;
if (otraVez == "no")
{
cout << "Hasta la vista, Baby. " << endl;
exit(0);
}
else if(otraVez == "si")
{
rellenarMatrizconcero(h_matriz);
rellenarMatrizconcero(h_matrizResultado);
movimientos = 0;
seguirJugando = true;
}
}
else if (ganado)
{
cout << endl << "LA MAQUINA HA GANADO VIVA TURING " << endl;
exit(0);
}
// Sleep chungo de C++. Cambiar el 100 por lo que se quiera
//this_thread::sleep_for(chrono::milliseconds(100));
// Si se quiere avanzar con enters descomentar esto:
//system("PAUSE");
}
cout << "A la maquina no le quedan vidas. Fin de juego. Adios Terminator. " << endl;
exit(0);
}
__host__ void juegoManual(int *h_matriz, int *h_matrizResultado, int *d_matrizEntrada, int *d_matrizSalida)
{
// Muestro mensaje de bienvenida
cout << "+----------------------------------------------------+" << endl;
cout << "|Hola amigo bienvenido al 16384 que ganitas de jugar |" << endl;
cout << "+----------------------------------------------------+" << endl;
cout << endl;
// Muestro caractersticas de la tarjeta
cout << "+----------------------------------------------------+" << endl;
caracteristicasTarjeta();
cout << "+----------------------------------------------------+" << endl;
// Variables de control y estados iniciales:
int movimientos = 0; // Contador de movimientos por partida
int puntuacion = 0; // Puntuacin total
vidas = 5; // Establezco vidas a 5.
char entrada1, entrada2; // Carcteres de lectura por teclado
bool correcto = false; // Variable control de entrada
bool puedeSeguirJugando = false; // An hay movimientos disponibles
bool ganado = false; // Si ha ganado
bool haGanadoYQuiereSeguir = false; // Comprobacion por si quiere seguir jugando despues de ganar
// Recojo nombre de usuario
string nombre;
cout << "+----------------------------------------------------+" << endl;
cout << "| Dame tu nombre amiguete: |" << endl;
cout << "+----------------------------------------------------+" << endl;
cin >> nombre;
cout << endl;
// Cargo (o no) la partida
cout << "+----------------------------------------------------+" << endl;
cout << "| Quieres cargar tu partida? |" << endl;
cout << "+----------------------------------------------------+" << endl;
string cargar;
cin >> cargar;
// Si quiere cargar y existe la partida, la cargo.
if (cargar == "si" && leerMatriz(h_matriz, nombre, &movimientos, &puntuacion))
{
cout << "+----------------------------------------------------+" << endl;
cout << "| Partida cargada. |" << endl;
cout << "+----------------------------------------------------+" << endl;
}
// Si no, establezco matriz.
else
{
inicializarMatriz(h_matriz);
}
// Juego:
while (true)
{
// Imprimo matriz y estadsticas
system("CLS");
cout << "+------------------------------------------------------------+" << endl;
printf("|Movimiento: %d\tPuntuacion: %d\t Vidas: %d \n", movimientos, puntuacion, vidas);
cout << "+------------------------------------------------------------+" << endl;
pintarMatriz(h_matriz);
// Tengo que volver a comprobar la entrada.
correcto = true;
// Las teclas de movimiento hacen input de dos caracteres,
// siendo el segundo el que nos importa para el movimiento
entrada1 = getch();
// Si el usuario quiere salir, se sale.
if (entrada1 == 's')
break;
else
{
// Obtengo segundo caracter
entrada2 = getch();
// Realizo jugada:
switch (entrada2)
{
// PARAMETROS DESPLAZAR_MATRIZ -> matriz inicial, matriz resultado, puntuacion, desplazamiento eje y, desplazamiento eje x
case 72:
puedeSeguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, DESP_NEG, NO_DESP); // Desplazar arriba
ganado = finJuego(d_matrizSalida);
break;
case 80:
puedeSeguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, DESP_POS, NO_DESP); // Desplazar abajo
ganado = finJuego(d_matrizSalida);
break;
case 75:
puedeSeguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, NO_DESP, DESP_NEG); // Desplazar izquierda
ganado = finJuego(d_matrizSalida);
break;
case 77:
puedeSeguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, NO_DESP, DESP_POS); // Desplazar derecha
ganado = finJuego(d_matrizSalida);
break;
default:
cout << "Caracter incorrecto. " << endl;
correcto = false;
}
}
// Tras hacer la jugada, compruebo el estado de la matriz.
if (correcto)
{
// Copio resultado a matriz:
copiarMatriz(h_matrizResultado, h_matriz);
// Incremento movimientos
movimientos++;
// Si pierde y le quedan vidas y no estaba farmeando puntos.
if (!puedeSeguirJugando && vidas > 1 && !haGanadoYQuiereSeguir)
{
// Resto una vida
vidas -= 1;
// Muestro mensaje por pantalla:
cout << "+---------------------------------------------------------------------------+" << endl;
cout << "| No hay mas movimientos posibles, fin de juego. Intentalo de nuevo. |" << endl;
cout << "| Te quedan: " << vidas << " vidas. | " << endl;
cout << "+---------------------------------------------------------------------------+" << endl;
// Recojo si quiere seguir jugando:
string otraVez;
do
{
cout << "+---------------------------------------------------------------------------+" << endl;
cout << "| Quieres intentarlo de nuevo (si/no)? |" << endl;
cout << "+---------------------------------------------------------------------------+" << endl;
cin >> otraVez;
}while (!(otraVez == "si") || !(otraVez != "no"));
// Si no quiere seguir jugando, se sale.
if (otraVez == "no")
{
cout << "Nos vemos amigo. " << endl;
exit(0);
}
// Si se quiere seguir jugando, se resetean datos.
else
{
rellenarMatrizconcero(h_matriz);
rellenarMatrizconcero(h_matrizResultado);
movimientos = 0;
ganado = false;
haGanadoYQuiereSeguir = false;
inicializarMatriz(h_matriz);
}
}
// Si pierde y no le quedan vidas y no estaba farmeando puntos.
else if (!puedeSeguirJugando && vidas == 1 && !haGanadoYQuiereSeguir)
{
vidas -= 1;
cout << endl << "No hay mas movimientos posibles, fin del juego." << endl;
cout << endl << "Adems no te quedan vidas." << endl;
cout << "Esta es tu puntuacion final: " << puntuacion << endl;
exit(0);
}
// Si haba ganado y ahora ya no puede seguir
else if (!puedeSeguirJugando && haGanadoYQuiereSeguir)
{
// Muestro mensaje por pantalla:
cout << "+---------------------------------------------------------------------------+" << endl;
cout << endl << "| No hay mas movimientos posibles, fin de juego. Intentalo de nuevo." << endl;
cout << endl << "| Te quedan: " << vidas << " vidas. " << endl;
cout << "+----------------------------------------------------------------------------+" << endl;
// Recojo si quiere seguir jugando:
string otraVez;
do
{
cout << "+---------------------------------------------------------------------------------------------+" << endl;
cout << "| Quieres intentarlo de nuevo (si/no)? |" << endl;
cout << "+---------------------------------------------------------------------------------------------+" << endl;
cin >> otraVez;
} while (otraVez != "si" || otraVez != "no");
// Si no quiere seguir jugando, se sale.
if (otraVez == "no")
{
cout << "Nos vemos amigo. " << endl;
exit(0);
}
// Si se quiere seguir jugando, se resetean datos.
else
{
rellenarMatrizconcero(h_matriz);
rellenarMatrizconcero(h_matrizResultado);
movimientos = 0;
ganado = false;
haGanadoYQuiereSeguir = false;
inicializarMatriz(h_matriz);
}
}
// Si acaba de ganar
else if (ganado && !haGanadoYQuiereSeguir)
{
cout << "+---------------------------------------------------------------------------+" << endl;
cout << "| Felicidades campeon, has ganado. Esta es tu puntuacion final: " << puntuacion << endl;
cout << "+---------------------------------------------------------------------------+" << endl;
string jugarMas;
while (!(jugarMas == "si") && !(jugarMas == "no"))
{
cout << endl << "Quieres seguir jugando?" << endl;
cin >> jugarMas;
}
if (jugarMas == "no")
{
cout << "Hasta luego!" << endl;
exit(0);
}
else
{
haGanadoYQuiereSeguir = true;
}
}
}
}
// Guardar partida
cout << "Quieres guardar partida? " << endl;
string entrada;
cin >> entrada;
if (entrada == "si")
{
escribirMatriz(h_matriz, nombre, &movimientos, &puntuacion);
cout << "Matriz guardada con nombre: " + nombre << endl;
}
}
// ----------- FUNCIONES DEVICE ----------- //
__device__ int getElemento(int *d_matriz, int fila, int columna, dimensionesMatriz* d_dimMatriz)
/*
Dada una matriz, devuelve el elemento en [fila][columna]
*/
{
return d_matriz[fila * d_dimMatriz->numColumnas + columna];
}
__device__ void setElemento(int *d_matriz, int fila, int columna, int elemento, dimensionesMatriz* d_dimMatriz)
/*
Dada una matriz, escribe el elemento en [fila][columna]
*/
{
d_matriz[fila * d_dimMatriz->numColumnas + columna] = elemento;
}
// --------- KERNELS PRINCIPALES ----------- //
__global__ void kernelCopiarMatriz(int *d_matrizCopia, int *d_matrizPega, dimensionesMatriz* d_dimMatriz)
/*
Dada una matriz a copiar, se pega todo el contenido de esta en la matriz a pegar.
*/
{
// Encuentro posicion:
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int columna = blockIdx.x * blockDim.x + threadIdx.x;
// Copio:
int elemento_copiar = getElemento(d_matrizCopia, fila, columna, d_dimMatriz);
// pego
setElemento(d_matrizPega, fila, columna, elemento_copiar, d_dimMatriz);
}
__global__ void kernelSuma(int *d_matrizEntrada, int *d_matrizSalida, int *d_puntuacion, int *despVertical, int *despHorizontal, dimensionesMatriz* d_dimMatriz)
/*
Dada una matriz de entrada y una de salida, escribe las sumas por desplazamiento en la matriz de salida.
*/
{
// Encuentro posicion:
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int columna = blockIdx.x * blockDim.x + threadIdx.x;
// Variables auxiliares para comprobaciones
int ultimaPosicion, desplazamiento, posicionActual;
bool esVertical;
// Analizo que tipo de movimiento se esta haciendo
if (*despVertical != 0)
{
// Si es vertical, ajusto parmetros:
posicionActual = fila;
desplazamiento = fila;
esVertical = true;
if (*despVertical == -1)
ultimaPosicion = 0;
else
ultimaPosicion = d_dimMatriz->numFilas - 1;
}
else
{
// Si es horizontal, ajusto parmetros
posicionActual = columna;
desplazamiento = columna;
esVertical = false;
if (*despHorizontal == -1)
ultimaPosicion = 0;
else
ultimaPosicion = d_dimMatriz->numColumnas - 1;
}
// Obtengo el elemento en la posicion
int elemento = getElemento(d_matrizEntrada, fila, columna, d_dimMatriz);
// Variable que controla si se multiplicare elm. x2 o no.
bool multiplicarem = false;
// Si no soy un 0:
if (elemento != 0 && posicionActual != ultimaPosicion)
{
// Compruebo paridad de los elementos en la direccin en la que me desplazo.
int paridad = 1;
// Casilla que compruebo en el bucle.
int casilla;
// Mientras no se encuentre un elemento distinto o se sobrepase la matriz
do {
// Casilla estudiada
if (esVertical)
casilla = getElemento(d_matrizEntrada, desplazamiento + *despVertical, columna, d_dimMatriz);
else
casilla = getElemento(d_matrizEntrada, fila, desplazamiento + *despHorizontal, d_dimMatriz);
// Si es diferente al elemento y no es 0, rompemos el bucle.
if (casilla != elemento && casilla != 0) { break; }
// Si hay otro elemento igual encima, aumento paridad
if (casilla == elemento) { paridad += 1; }
// Y sigo viendo
desplazamiento += *despHorizontal + *despVertical;
} while (desplazamiento != ultimaPosicion);
// Si hay pares, pongo mult. a true.
if (paridad % 2 == 0)
{
multiplicarem = true;
}
// Espero a todos los hilos
__syncthreads();
// Si debo multiplicar, multiplico
if (multiplicarem)
{
// Encuentro la pos. del elemento a mul * 2
int casilla;
desplazamiento = posicionActual; // Reseteamos el desplazamiento
// Mientras haya 0s me desplazo.
do {
desplazamiento += *despHorizontal + *despVertical;
if (esVertical)
casilla = getElemento(d_matrizEntrada, desplazamiento, columna, d_dimMatriz);
else
casilla = getElemento(d_matrizEntrada, fila, desplazamiento, d_dimMatriz);
} while (casilla != elemento);
// Sumo la puntuacion parcial que ha obtenido cada hilo con una suma atomica
atomicAdd(d_puntuacion, elemento * 2);
// Duplico el elemento que tengo encima
if (esVertical)
setElemento(d_matrizSalida, desplazamiento, columna, elemento * 2, d_dimMatriz);
else
setElemento(d_matrizSalida, fila, desplazamiento, elemento * 2, d_dimMatriz);
}
// Si no, me escribo a mi mismo en la matriz de salida.
else
{
setElemento(d_matrizSalida, fila, columna, getElemento(d_matrizEntrada, fila, columna, d_dimMatriz), d_dimMatriz);
}
// Espero a que todos los hilos multipliquen.
__syncthreads();
}
else
{
setElemento(d_matrizSalida, fila, columna, getElemento(d_matrizEntrada, fila, columna, d_dimMatriz), d_dimMatriz);
}
// Espero a que finalicen los hilos.
__syncthreads();
}
__global__ void kernelSetMatrizCeros(int *matriz, dimensionesMatriz* d_dimMatriz)
/*
Dada una matriz, setea todas sus posiciones a 0.
*/
{
// Encuentro posicion:
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int columna = blockIdx.x * blockDim.x + threadIdx.x;
// Elemento en la posicin
setElemento(matriz, fila, columna, 0, d_dimMatriz);
// Espero a que el resto de hilos pongan 0s.
__syncthreads();
}
__global__ void kernelDesplazar(int *d_matrizEntrada, int *d_matrizSalida, int* despVertical, int* despHorizontal, dimensionesMatriz* d_dimMatriz)
/*
Dada una matriz, desplaza sus elementos 1 vez en la direccin indicada, si se puede.
*/
{
// Encuentro posicion y elemento de mi bloque:
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int columna = blockIdx.x * blockDim.x + threadIdx.x;
int elemento = getElemento(d_matrizEntrada, fila, columna, d_dimMatriz);
int ultimaPosicion, posicionActual;
// Analizo que tipo de movimiento se esta haciendo
if (*despVertical != 0)
{
posicionActual = fila;
if (*despVertical == -1)
ultimaPosicion = 0;
else
ultimaPosicion = d_dimMatriz->numFilas - 1;
}
else
{
posicionActual = columna;
if (*despHorizontal == -1)
ultimaPosicion = 0;
else
ultimaPosicion = d_dimMatriz->numColumnas - 1;
}
// Variable que dice si se debe mover o no.
bool desplazarem = false;
// Si soy distinto de 0 y no estoy en el limite
if ((posicionActual != ultimaPosicion) && (elemento != 0))
{
// Si la casilla siguiente a la ma en el movimiento es un 0, desplazar hacia esa direccin.
int casillaVecina = getElemento(d_matrizEntrada, fila + *despVertical, columna + *despHorizontal, d_dimMatriz);
if (casillaVecina == 0)
{
desplazarem = true;
}
// Espero a que marquen el resto de hilos.
__syncthreads();
// Y desplazo:
if (desplazarem)
{
//printf("Soy [%d][%d] (%d) y me desplazo. \n", fila, columna, elemento);
setElemento(d_matrizSalida, fila + *despVertical, columna + *despHorizontal, elemento, d_dimMatriz);
}
// O escribo mi valor.
else
{
//printf("Soy [%d][%d] (%d) y NO me desplazo. \n", fila, columna, elemento);
setElemento(d_matrizSalida, fila, columna, elemento, d_dimMatriz);
}
// Espero resto de hilos:
__syncthreads();
}
// Si estoy en el limite
else if (elemento != 0)
{
//printf("Soy [%d][%d] (%d) y NO me desplazo pq estoy al limite o soy un 0. \n", fila, columna, elemento);
setElemento(d_matrizSalida, fila, columna, elemento, d_dimMatriz);
}
// Si no, soy un cero y no tengo que escribir nada porque d_matrizSalida es una matriz de 0s.
// Espero al resto de hilos
__syncthreads();
}
// -------- KERNELS COMPROBADORES ---------- //
__global__ void kernelComprobarIguales(int *d_matrizUno, int *d_matrizDos, bool* d_sonIguales, dimensionesMatriz* d_dimMatriz)
/*
Dadas dos matrices, deja sonIguales a true si lo son.
*/
{
// Encuentro posicion:
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int columna = blockIdx.x * blockDim.x + threadIdx.x;
// Elemento min & mout:
int elemento1 = getElemento(d_matrizUno, fila, columna, d_dimMatriz);
int elemento2 = getElemento(d_matrizDos, fila, columna, d_dimMatriz);
if (elemento1 != elemento2)
*d_sonIguales = false;
// Espero al resto de hilos:
__syncthreads();
}
__global__ void kernelComprobarLlena(int *d_matriz, bool* d_estaLlena, dimensionesMatriz* d_dimMatriz)
/*
Dadas una matriz, pone estaLlena a false si hay algn 0 y, por tanto, no est llena.
*/
{
// Encuentro posicion:
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int columna = blockIdx.x * blockDim.x + threadIdx.x;
// Elemento min & mout:
int elemento = getElemento(d_matriz, fila, columna, d_dimMatriz);
if (elemento == 0)
*d_estaLlena = false;
// Espero al resto de hilos:
__syncthreads();
}
__global__ void kernelComprobarSiHaGanado(int *d_matriz, bool* d_haGanado, dimensionesMatriz* d_dimMatriz)
/*
Dadas una matriz, pone estaLlena a false si hay algn 0 y, por tanto, no est llena.
*/
{
// Encuentro posicion:
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int columna = blockIdx.x * blockDim.x + threadIdx.x;
// Elemento min & mout:
int elemento = getElemento(d_matriz, fila, columna, d_dimMatriz);
if (elemento == OBJETIVO)
*d_haGanado = true;
// Espero al resto de hilos:
__syncthreads();
}
__global__ void kernelComprobarMovimientosPosibles(int *d_matriz, bool *seguirJugando, dimensionesMatriz* d_dimMatriz)
/*
Comprueba si hay elementos posibles, si los hay, devuelve true. Si no hay movimientos posibles, devuelve false
*/
{
// Encuentro posicion:
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int columna = blockIdx.x * blockDim.x + threadIdx.x;
int elemento = getElemento(d_matriz, fila, columna, d_dimMatriz);
bool seguirJugando_aux; // Booleano auxiliar para no escribir en el parametro directamente
// Booleanos para ver donde en que direccion podemos movernos
bool comprobarArr = true, comprobarAb = true, comprobarIzq = true, comprobarDer = true;
// Booleanos para comprobar los elementos con los que no podemos combinarnos
bool combinarArr = false, combinarAb = false, combinarIzq = false, combinarDer = false;
// Comprobamos en que posicion estamos para no salirnos fuera de los rangos de la matriz
if (fila == 0)
comprobarArr = false;
else if (fila == d_dimMatriz->numFilas - 1)
comprobarAb = false;
if (columna == 0)
comprobarIzq = false;
else if (columna == d_dimMatriz->numColumnas - 1)
comprobarDer = false;
int elementoEstudiado;
if (comprobarArr) {
elementoEstudiado = getElemento(d_matriz, fila - 1, columna, d_dimMatriz);
if (elementoEstudiado == elemento)
combinarArr = true;
}
if (comprobarAb) {
elementoEstudiado = getElemento(d_matriz, fila + 1, columna, d_dimMatriz);
if (elementoEstudiado == elemento)
combinarAb = true;
}
if (comprobarDer) {
elementoEstudiado = getElemento(d_matriz, fila, columna + 1, d_dimMatriz);
if (elementoEstudiado == elemento)
combinarDer = true;
}
if (comprobarIzq) {
elementoEstudiado = getElemento(d_matriz, fila, columna - 1, d_dimMatriz);
if (elementoEstudiado == elemento)
combinarIzq = true;
}
seguirJugando_aux = combinarArr || combinarAb || combinarIzq || combinarDer;
if (seguirJugando_aux)
*seguirJugando = seguirJugando_aux;
}
// -------- FUNCIONES AUX HOST ----------- //
__host__ void leerParametros(int argc, const char* argv[])
/*
Parsea los parmetros introducidos en la llamada al programa por consola, seteando
las variables del juego.
*/
{
if ((argc != 5) || ((argv[1][0] != 'a') && (argv[1][0] != 'm')) || ((argv[2][0] != 'f') && (argv[2][0] != 'd')))
{
cout << "Error en la introduccion de parametros, los parametros son:\nautomatico/manual (a/m), facil/dificil (f/d), num_filas, num_columnas\n\nUso = nombreprograma a/m f/d num_filas num_columnas\n" << endl;
exit(1);
}
else
{
dimMatriz.numFilas = atoi(argv[3]);
dimMatriz.numColumnas = atoi(argv[4]);
if (dimMatriz.numFilas != dimMatriz.numColumnas)
{
cout << "El numero de filas y de columnas no puede ser distinto, crack." << endl;
exit(2);
}
bytesMatriz = atoi(argv[3]) * atoi(argv[4]) * sizeof(int);
// Se dimensionan los hilos y los grids de bloques
if (dimMatriz.numFilas % 2 == 0)
{
dim3 bloques(2, 2);
dim3 hilos(dimMatriz.numFilas / 2, dimMatriz.numColumnas / 2);
dimGrid = bloques;
dimBlock = hilos;
}
else
{
dim3 bloques(1, 1);
dim3 hilos(dimMatriz.numFilas, dimMatriz.numColumnas);
dimGrid = bloques;
dimBlock = hilos;
}
if (argv[1][0] == 'a')
automatico = true;
else
automatico = false;
if (argv[2][0] == 'f')
modoDiablo = false;
else
modoDiablo = true;
}
}
__host__ void pintarMatriz(int *h_matriz)
/*
Dada una matriz, la dibuja por pantalla.
*/
{
HANDLE hConsole = GetStdHandle(STD_OUTPUT_HANDLE);
for (size_t i = 0; i < dimMatriz.numColumnas; i++)
{
SetConsoleTextAttribute(hConsole, 14);
cout << ("+-------");
}
cout << "+" << endl;
for (int i = 0; i < dimMatriz.numColumnas; i++)
{
for (int j = 0; j < dimMatriz.numFilas; j++)
{
// La funcion de print evalua en su interior si deberia poner un /t o no, dependiendo de la longitud del numero
printf("[%d%s]", *(h_matriz + i * dimMatriz.numColumnas + j),
*(h_matriz + i * dimMatriz.numColumnas + j) % 100000 == *(h_matriz + i * dimMatriz.numColumnas + j) ? "\t" : "");
}
printf("\n");
}
for (size_t i = 0; i < dimMatriz.numColumnas; i++)
{
cout << ("+-------");
}
cout << "+" << endl;
SetConsoleTextAttribute(hConsole, 15);
}
__host__ void caracteristicasTarjeta()
/*
Saca por pantalla las caracteristicas de todas las tarjetas graficas del pc
*/
{
// Recojo el nmero de tarjetas de la grfica
int numTarjetas;
hipGetDeviceCount(&numTarjetas);
// Para cada una, imprimo sus caractersticas
for (int i = 0; i < numTarjetas; i++) {
hipDeviceProp_t caracteristicas;
hipGetDeviceProperties(&caracteristicas, i);
printf("Numero de dispositivo: %d\n", i);
printf(" Nombre del dispositivo: %s\n", caracteristicas.name);
printf(" Frecuencia del reloj de memoria (KHz): %d\n",
caracteristicas.memoryClockRate);
printf(" Interfaz de memoria (bits): %d\n",
caracteristicas.memoryBusWidth);
printf(" Ancho de banda de memoria (GB/s): %f\n",
2.0*caracteristicas.memoryClockRate*(caracteristicas.memoryBusWidth / 8) / 1.0e6);
}
}
// ------- OP. CON MATRIZ EN HOST ------- //
__host__ void inicializarMatriz(int *h_matriz)
/*
Dada una matriz, la rellena con 0s, 2s, 4s u 8s, aleatoriamente y dependiendo del nivel de dificultad elegido.
*/
{
srand(time(NULL));
// Contador de casillas rellenadas. Dependiendo de la dificultad, tiene un tope distinto.
int contadorSemillas = 0;
int *posicionAleatoria;
if (modoDiablo)
{
int array_posibles_numeros[] = { 2,4 };
while ((contadorSemillas < 8) && (contadorSemillas < dimMatriz.numFilas * dimMatriz.numColumnas)) // Mientras no se hayan lanzado todas las semillas
{
posicionAleatoria = h_matriz + (rand() % dimMatriz.numColumnas) * dimMatriz.numColumnas + (rand() % dimMatriz.numFilas); // Calculo una posicion aleatoria donde poner una de las semillas
if (*posicionAleatoria == 0) // Si es 0 inicialmente, es decir, no ha sido escogida por segunda vez
{
*posicionAleatoria = array_posibles_numeros[rand() % 2]; // Cambio ese cero por un numero aleatorio entre los candidatos (2 o 4)
contadorSemillas++; // Sumo uno al contador de semillas
}
}
}
else
{
int array_posibles_numeros[] = { 2,4,8 };
while ((contadorSemillas < 15) && (contadorSemillas < dimMatriz.numFilas * dimMatriz.numColumnas)) // Mientras no se hayan lanzado todas las semillas
{
posicionAleatoria = h_matriz + (rand() % dimMatriz.numColumnas) * dimMatriz.numColumnas + (rand() % dimMatriz.numFilas); // Calculo una posicion aleatoria donde poner una de las semillas
if (*posicionAleatoria == 0) // Si es 0 inicialmente, es decir, no ha sido escogida por segunda vez
{
*posicionAleatoria = array_posibles_numeros[rand() % 3]; // Cambio ese cero por un numero aleatorio entre los candidatos (2, 4 u 8)
contadorSemillas++; // Sumo uno al contador de semillas
}
}
}
}
__host__ void rellenarMatrizconcero(int *h_matriz)
/*
Dada una matriz, la rellena con 0s.
*/
{
for (int i = 0; i < dimMatriz.numColumnas; ++i) {
for (int j = 0; j < dimMatriz.numFilas; ++j) {
*(h_matriz + i * dimMatriz.numColumnas + j) = 0;
}
}
}
__host__ void copiarMatriz(int *h_matrizCopia, int *h_matrizPega)
/*
Copia matriz de copia en matriz de pega.
*/
{
// Punteros a matrices en DEVICE:
int *d_matrizCopia;
int *d_matrizPega;
dimensionesMatriz* d_dimMatriz;
// Reservo memoria en DEVICE:
hipMalloc((void **)&d_matrizCopia, bytesMatriz);
hipMalloc((void **)&d_matrizPega, bytesMatriz);
hipMalloc((void **)&d_dimMatriz, sizeof(dimensionesMatriz));
// Muevo matrices de HOST a DEVICE:
hipMemcpy(d_matrizCopia, h_matrizCopia, bytesMatriz, hipMemcpyHostToDevice);
hipMemcpy(d_matrizPega, h_matrizPega, bytesMatriz, hipMemcpyHostToDevice);
hipMemcpy(d_dimMatriz, &dimMatriz, sizeof(dimensionesMatriz), hipMemcpyHostToDevice);
// Primero, copio salida a entrada.
kernelCopiarMatriz << < dimGrid, dimBlock >> > (d_matrizCopia, d_matrizPega, d_dimMatriz);
hipDeviceSynchronize();
// Despus, pongo a 0 la matriz de copia.
kernelSetMatrizCeros << < dimGrid, dimBlock >> > (d_matrizCopia, d_dimMatriz);
hipDeviceSynchronize();
// Devolvemos resultado de DEVICE a HOST:
hipMemcpy(h_matrizPega, d_matrizPega, bytesMatriz, hipMemcpyDeviceToHost);
hipMemcpy(h_matrizCopia, d_matrizCopia, bytesMatriz, hipMemcpyDeviceToHost);
// Libero memoria de DEVICE:
hipFree(d_matrizPega);
hipFree(d_matrizCopia);
hipFree(d_dimMatriz);
}
__host__ bool desplazarMatriz(int *h_matrizEntrada, int *h_matrizSalida, int* d_matrizEntrada, int* d_matrizSalida, int* h_puntuacion, int despVertical, int despHorizontal)
{
int* d_despVertical = 0;
int* d_despHorizontal = 0;
int* d_puntuacion = 0;
dimensionesMatriz* d_dimMatriz;
// Reservo memoria en DEVICE:
hipMalloc((void **)&d_despVertical, sizeof(int));
hipMalloc((void **)&d_despHorizontal, sizeof(int));
hipMalloc((void **)&d_puntuacion, sizeof(int));
hipMalloc((void **)&d_dimMatriz, sizeof(dimensionesMatriz));
// Muevo matrices de HOST a DEVICE:
hipMemcpy(d_matrizEntrada, h_matrizEntrada, bytesMatriz, hipMemcpyHostToDevice);
hipMemcpy(d_matrizSalida, h_matrizSalida, bytesMatriz, hipMemcpyHostToDevice);
hipMemcpy(d_puntuacion, h_puntuacion, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_dimMatriz, &dimMatriz, sizeof(dimensionesMatriz), hipMemcpyHostToDevice);
hipMemcpy(d_despVertical, &despVertical, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_despHorizontal, &despHorizontal, sizeof(int), hipMemcpyHostToDevice);
// Realizo la suma:
//kernelSuma << < dimGrid, dimBlock >> > (d_matrizEntrada, d_matrizSalida, d_puntuacion, d_despVertical, d_despHorizontal, d_dimMatriz);
kernelSumaYDesplazaDerecha << < dimGrid, dimBlock, dimMatriz.numFilas * dimMatriz.numColumnas * sizeof(int) >> > (d_matrizEntrada, d_matrizSalida, d_puntuacion, d_dimMatriz);
// Espero a que termine de operar:
hipDeviceSynchronize();
hipMemcpy(h_puntuacion, d_puntuacion, sizeof(int), hipMemcpyDeviceToHost);
// Variable que dice si las matrices son iguales o no.
bool h_iguales = true;
bool *d_iguales;
hipMalloc((void **)&d_iguales, sizeof(bool));
/*
// Mientras la matriz de entrada sea distinta de salida,
// significa que puedo seguir desplazando.
// Cuando sean iguales, detengo el bucle.
do
{
// Primero, copio salida a entrada.
kernelCopiarMatriz << < dimGrid, dimBlock >> > (d_matrizSalida, d_matrizEntrada, d_dimMatriz);
hipDeviceSynchronize();
// Segundo, seteo salida a 0.
kernelSetMatrizCeros << < dimGrid, dimBlock >> > (d_matrizSalida, d_dimMatriz);
hipDeviceSynchronize();
// Desplazo
kernelDesplazar << < dimGrid, dimBlock >> > (d_matrizEntrada, d_matrizSalida, d_despVertical, d_despHorizontal, d_dimMatriz);
hipDeviceSynchronize();
// Compruebo si tengo que seguir desplazando.
// Doy por hecho que son iguales. Si no lo son, desplazare.
h_iguales = true;
// Muevo a device.
hipMemcpy(d_iguales, &h_iguales, sizeof(bool), hipMemcpyHostToDevice);
// Veo si son iguales.
kernelComprobarIguales << < dimGrid, dimBlock >> > (d_matrizSalida, d_matrizEntrada, d_iguales, d_dimMatriz);
hipDeviceSynchronize();
// Limpio memoria tras trastear con d_iguales.
hipMemcpy(&h_iguales, d_iguales, sizeof(bool), hipMemcpyDeviceToHost);
} while (!h_iguales);
hipFree(d_iguales);
// Compruebo si la matriz est llena y si se puede mover en cualq. direccin
bool h_movimientosPosibles = true;
// Devolvemos resultado de DEVICE a HOST:
hipMemcpy(h_matrizSalida, d_matrizSalida, bytesMatriz, hipMemcpyDeviceToHost);
// Si esta llena compruebo si hay movimientos posibles
if (estaLlena(d_matrizSalida))
h_movimientosPosibles = movimientosPosibles(d_matrizSalida);
// Si no, aado una nueva semilla a la matriz resultante en host
else {
nuevaSemilla(h_matrizSalida, 1); // Aadimos la nueva semilla
// Comprobamos si con la nueva semilla anadida, hemos perdido
hipMemcpy(d_matrizSalida, h_matrizSalida, bytesMatriz, hipMemcpyHostToDevice);
if (estaLlena(d_matrizSalida))
h_movimientosPosibles = movimientosPosibles(d_matrizSalida);
}
// Libero memoria de DEVICE:
hipFree(d_despVertical);
hipFree(d_despHorizontal);
hipFree(d_dimMatriz);
return h_movimientosPosibles;
*/
system("pause");
return true;
}
__global__ void kernelSumaYDesplazaDerecha(int *d_matrizEntrada, int *d_matrizSalida, int *d_puntuacion, dimensionesMatriz* d_dimMatriz)
/*
Dada una matriz de entrada y una de salida, escribe las sumas por desplazamiento en la matriz de salida.
*/
{
// Memoria compartida inicial (se escribe la fila ini)
extern __shared__ int s_fila_ini[];//d_dimMatriz->numColumnas];
// Fila tras desplazar
//__shared__ int s_fila_fin[d_dimMatriz->numColumnas];
// Encuentro posicion:
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int columna = blockIdx.x * blockDim.x + threadIdx.x;
// Obtengo el elemento en la posicion
int elemento = getElemento(d_matrizEntrada, fila, columna, d_dimMatriz);
s_fila_ini[fila * d_dimMatriz->numColumnas + columna] = elemento;
printf("[%d][%d] = %d\n", fila, columna, s_fila_ini[fila * d_dimMatriz->numColumnas + columna]);
// Variables auxiliares para comprobaciones
int ultimaPosicion, desplazamiento, posicionActual;
// Si es horizontal, ajusto parmetros
desplazamiento = columna;
ultimaPosicion = d_dimMatriz->numColumnas - 1;
s_fila_ini[columna] = elemento;
//s_fila_fin[columna] = 0;
// Variable que controla si se multiplicare elm. x2 o no.
bool multiplicarem = false;
// Si no soy un 0:
if (elemento != 0 && posicionActual != ultimaPosicion)
{
// Compruebo paridad de los elementos en la direccin en la que me desplazo.
int paridad = 1;
// Casilla que compruebo en el bucle.
int casilla;
desplazamiento = columna + 1;
// Mientras no se encuentre un elemento distinto o se sobrepase la matriz
do {
// Casilla estudiada
casilla = s_fila_ini[desplazamiento + 1];
// Si es diferente al elemento y no es 0, rompemos el bucle.
if (casilla != elemento && casilla != 0) { break; }
// Si hay otro elemento igual encima, aumento paridad
if (casilla == elemento) { paridad += 1; }
// Y sigo viendo
desplazamiento += 1;
} while (desplazamiento != ultimaPosicion);
// Si hay pares, pongo mult. a true.
if (paridad % 2 == 0)
{
multiplicarem = true;
}
// Espero a todos los hilos
__syncthreads();
// Si debo multiplicar, multiplico
if (multiplicarem)
{
// Encuentro la pos. del elemento a mul * 2
int casilla;
desplazamiento = columna; // Reseteamos el desplazamiento
// Mientras haya 0s me desplazo.
do {
desplazamiento += 1;
casilla = s_fila_ini[desplazamiento + 1];
} while (casilla != elemento);
// Sumo la puntuacion parcial que ha obtenido cada hilo con una suma atomica
atomicAdd(d_puntuacion, elemento * 2);
// Duplico el elemento que tengo encima
//s_fila_sum[desplazamiento] = elemento * 2;
}
// Si no, me escribo a mi mismo en la matriz de salida.
else
{
//s_fila_sum[desplazamiento] = elemento;
}
// Espero a que todos los hilos multipliquen.
__syncthreads();
}
else
{
//s_fila_sum[columna] = elemento;
}
// Espero a que finalicen los hilos.
__syncthreads();
setElemento(d_matrizSalida, fila, columna, elemento, d_dimMatriz);
/*
// A PARTIR DE AHORA ES DESPLAZAMIENTO
// Veces que me tendr que desplazar:
desplazamiento = columna;
// Veces que tendr que desplazarme
int cuantosCeros = 0;
int casilla;
do {
casilla = s_fila_ini[desplazamiento + 1];
// Si hay otro elemento igual encima, aumento paridad
if (casilla == 0) { cuantosCeros += 1; }
// Y sigo viendo
desplazamiento += 1;
} while (desplazamiento != ultimaPosicion);
// una vez s cuntas veces debo desplazarme, desplazo.
//s_fila_fin[columna + cuantosCeros] = s_fila_sum[columna]
setElemento(d_matrizSalida, fila, columna + cuantosCeros, elemento, d_dimMatriz);
*/
}
__host__ void setElementoHost(int *h_matriz, int fila, int columna, int elemento)
/*
Dada una matriz, escribe el elemento en [fila][columna]
*/
{
h_matriz[fila * dimMatriz.numColumnas + columna] = elemento;
}
__host__ void nuevaSemilla(int *h_matriz, int numSemillas)
/*
Crea numSemillas nuevas semillas en la matriz almacenada en device
*/
{
int *posicionAleatoria;
bool semillaGenerada = false;
if (modoDiablo)
{
int array_posibles_numeros[] = { 2,4 };
while ((!semillaGenerada) && (numSemillas != 0)) // Mientras no se haya encontrado una posicion con 0 y no se hallan lanzado todas las semillas
{
posicionAleatoria = h_matriz + (rand() % dimMatriz.numColumnas) * dimMatriz.numColumnas + (rand() % dimMatriz.numFilas); // Calculo una posicion aleatoria donde poner una de las semillas
if (*posicionAleatoria == 0) // Si es 0 inicialmente, es decir, no ha sido escogida por segunda vez
{
*posicionAleatoria = array_posibles_numeros[rand() % 2]; // Cambio ese cero por un numero aleatorio entre los candidatos (2 o 4)
semillaGenerada = true;
numSemillas--;
}
}
}
else
{
int array_posibles_numeros[] = { 2,4,8 };
while ((!semillaGenerada) && (numSemillas != 0)) // Mientras no se haya encontrado una posicion con 0 y no se hayan lanzado todas las semillas
{
posicionAleatoria = h_matriz + (rand() % dimMatriz.numColumnas) * dimMatriz.numColumnas + (rand() % dimMatriz.numFilas); // Calculo una posicion aleatoria donde poner una de las semillas
if (*posicionAleatoria == 0) // Si es 0 inicialmente, es decir, no ha sido escogida por segunda vez
{
*posicionAleatoria = array_posibles_numeros[rand() % 3]; // Cambio ese cero por un numero aleatorio entre los candidatos (2, 4 u 8)
semillaGenerada = true;
numSemillas--;
}
}
}
}
// ------- COMPROBACIONES EN HOST ------- //
__host__ bool estaLlena(int* d_matriz)
{
// Compruebo si la matriz esta llena
bool h_estaLlena = true;
bool *d_estaLlena;
dimensionesMatriz* d_dimMatriz;
hipMalloc((void **)&d_estaLlena, sizeof(bool));
hipMalloc((void **)&d_dimMatriz, sizeof(dimensionesMatriz));
hipMemcpy(d_estaLlena, &h_estaLlena, sizeof(bool), hipMemcpyHostToDevice);
hipMemcpy(d_dimMatriz, &dimMatriz, sizeof(dimensionesMatriz), hipMemcpyHostToDevice);
// Veo si est llena.
kernelComprobarLlena << < dimGrid, dimBlock >> > (d_matriz, d_estaLlena, d_dimMatriz);
hipDeviceSynchronize();
hipMemcpy(&h_estaLlena, d_estaLlena, sizeof(bool), hipMemcpyDeviceToHost);
// Limpio memoria tras trastear con d_estaLlena.
hipFree(d_estaLlena);
hipFree(d_dimMatriz);
return h_estaLlena;
}
__host__ bool finJuego(int* d_matriz)
{
// Compruebo si la matriz contiene algn 16384
bool h_haGanado = false;
bool *d_haGanado;
dimensionesMatriz* d_dimMatriz;
hipMalloc((void **)&d_haGanado, sizeof(bool));
hipMalloc((void **)&d_dimMatriz, sizeof(dimensionesMatriz));
hipMemcpy(d_haGanado, &h_haGanado, sizeof(bool), hipMemcpyHostToDevice);
hipMemcpy(d_dimMatriz, &dimMatriz, sizeof(dimensionesMatriz), hipMemcpyHostToDevice);
// Veo si est llena.
kernelComprobarSiHaGanado << < dimGrid, dimBlock >> > (d_matriz, d_haGanado, d_dimMatriz);
hipDeviceSynchronize();
hipMemcpy(&h_haGanado, d_haGanado, sizeof(bool), hipMemcpyDeviceToHost);
// Limpio memoria tras trastear con d_estaLlena.
hipFree(d_haGanado);
hipFree(d_dimMatriz);
return h_haGanado;
}
__host__ bool movimientosPosibles(int* d_matriz)
/*
Llama al kernel de comprobacion de movimientos posibles
*/
{
bool h_movimientosPosibles = false;
dimensionesMatriz* d_dimMatriz;
bool *d_movimientosPosibles;
hipMalloc((void **)&d_movimientosPosibles, sizeof(bool));
hipMalloc((void **)&d_dimMatriz, sizeof(dimensionesMatriz));
hipMemcpy(d_movimientosPosibles, &h_movimientosPosibles, sizeof(bool), hipMemcpyHostToDevice);
hipMemcpy(d_dimMatriz, &dimMatriz, sizeof(dimensionesMatriz), hipMemcpyHostToDevice);
// Compruebo si hay movimientos que se puedan hacer
kernelComprobarMovimientosPosibles << < dimGrid, dimBlock >> > (d_matriz, d_movimientosPosibles, d_dimMatriz);
hipDeviceSynchronize();
// Paso el booleano a memoria del host y libero la memoria de device
hipMemcpy(&h_movimientosPosibles, d_movimientosPosibles, sizeof(bool), hipMemcpyDeviceToHost);
hipFree(d_dimMatriz);
hipFree(d_movimientosPosibles);
return h_movimientosPosibles;
}
// ----- GUARDADO Y LECTURA ----- //
__host__ void escribirMatriz(int* h_matriz, string nombreJugador, int* puntuacion, int* movimientos)
{
FILE *archivo;
// Preparo nombre:
nombreJugador += ".txt";
char * nombreArchivo = new char[nombreJugador.length() + 1];
strcpy(nombreArchivo, nombreJugador.c_str());
// Abro archivo:
archivo = fopen(nombreArchivo, "w");
if (archivo == NULL)
{
cout << "Error escribiendo partida. " << endl;
}
else
{
fprintf(archivo, "%d\n", dimMatriz.numFilas);
fprintf(archivo, "%d\n", dimMatriz.numColumnas);
fprintf(archivo, "%d\n", vidas);
fprintf(archivo, "%d\n", *movimientos);
fprintf(archivo, "%d\n", *puntuacion);
for (int i = 0; i < dimMatriz.numColumnas; ++i) {
for (int j = 0; j < dimMatriz.numFilas; ++j) {
fprintf(archivo, "%d ", *(h_matriz + i * dimMatriz.numColumnas + j));
}
fprintf(archivo, "\n");
}
}
fclose(archivo);
}
__host__ bool leerMatriz(int* h_matriz, string nombreJugador, int* puntuacion, int* movimientos)
{
// Cargo el archivo
ifstream in(nombreJugador + ".txt");
bool lecturaCorrecta = true;
// Si error
if (!in)
{
cout << "Erro abriendo el archivo. La partida no existe, se iniciara una partida nueva." << endl;
lecturaCorrecta = false;
}
// Si no, escribo matriz
else
{
int a_filas, a_columnas;
in >> a_filas;
in >> a_columnas;
in >> vidas;
if (a_filas != dimMatriz.numFilas || a_columnas != dimMatriz.numColumnas)
{
cout << "La partida cargada no es congruente con el numero de filas/columnas pasada como parametro." << endl;
cout << "Se iniciara una partida nueva." << endl;
lecturaCorrecta = false;
}
else
{
// Cargo movimientos y puntuacion
in >> *movimientos;
in >> *puntuacion;
for (int fila = 0; fila < dimMatriz.numFilas; fila++)
{
for (int columna = 0; columna < dimMatriz.numColumnas; columna++)
{
// Parseo el numero
int num;
in >> num;
// Lo escribo en la posicion
setElementoHost(h_matriz, fila, columna, num);
}
}
}
}
// Cierro archivo
in.close();
return lecturaCorrecta;
}
| 129f55e251e7b3dcd1cdd2da698f80638b33e08e.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <conio.h>
#include <time.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <string>
#include <chrono>
#include <thread>
#include <Windows.h>
using namespace std;
#define OBJETIVO 2048
#define DESP_POS 1
#define DESP_NEG -1
#define NO_DESP 0
#ifdef __INTELLISENSE__
void __syncthreads(); // Para evitar el error que da el intellisense con syncthreads y atomicadd
void atomicAdd(int *a, int b);
#endif
// Variables globales para recoger por parámetros
struct dimensionesMatriz {
int numFilas;
int numColumnas;
} dimMatriz;
dim3 dimGrid; // Grid de bloques
dim3 dimBlock; // Hilos por bloque
// Variables de control
// Juego automático o manual
bool automatico;
// Nº de bytes que ocupa la matriz
int bytesMatriz;
// Dificultad del juego
bool modoDiablo;
// Control de vidas:
int vidas;
// Funciones de juego
__host__ void juegoAutomatico(int *h_matriz, int *h_matrizResultado, int *d_matrizEntrada, int *d_matrizSalida);
__host__ void juegoManual(int *h_matriz, int *h_matrizResultado, int *d_matrizEntrada, int *d_matrizSalida);
// Funciones auxiliares en Device
__device__ int getElemento(int *matriz, int fila, int columna, dimensionesMatriz* d_dimMatriz);
__device__ void setElemento(int *matriz, int fila, int columna, int elemento, dimensionesMatriz* d_dimMatriz);
// Kernels
// Kernel movimiento
__global__ void kernelDesplazar(int *h_matrizEntrada, int *h_matrizSalida, int *despVertical, int *despHorizontal, dimensionesMatriz* d_dimMatriz);
// Kernels auxiliares
__global__ void kernelSuma(int *h_matrizEntrada, int *h_matrizSalida, int *d_puntuacion, int *despVertical, int *despHorizontal, dimensionesMatriz* d_dimMatriz);
__global__ void kernelCopiarMatriz(int *d_matrizCopia, int *d_matrizPega, dimensionesMatriz* d_dimMatriz);
__global__ void kernelSetMatrizCeros(int *matriz, dimensionesMatriz* d_dimMatriz);
__global__ void kernelSumaYDesplazaDerecha(int *d_matrizEntrada, int *d_matrizSalida, int *d_puntuacion, dimensionesMatriz* d_dimMatriz);
// Funciones auxiliares de comprobación de estado de la matriz
__global__ void kernelComprobarIguales(int *d_matrizUno, int *d_matrizDos, bool* d_sonIguales, dimensionesMatriz* d_dimMatriz);
__global__ void kernelComprobarLlena(int *d_matrizUno, bool* d_estaLlena, dimensionesMatriz* d_dimMatriz);
__global__ void kernelComprobarMovimientosPosibles(int *d_matriz, bool *seguirJugando, dimensionesMatriz* d_dimMatriz);
__global__ void kernelComprobarSiHaGanado(int *d_matriz, bool* d_haGanado, dimensionesMatriz* d_dimMatriz);
// Funciones auxiliares en Host
__host__ void caracteristicasTarjeta();
__host__ void leerParametros(int argc, const char* argv[]);
// Operaciones con matrices
__host__ void inicializarMatriz(int *h_matriz);
__host__ void rellenarMatrizconcero(int *h_matriz);
__host__ void pintarMatriz(int *h_matriz);
__host__ void copiarMatriz(int *h_matrizCopia, int *h_matrizPega);
__host__ void setElementoHost(int *h_matriz, int fila, int columna, int elemento);
__host__ void nuevaSemilla(int *h_matriz, int numSemillas);
// Comprobadores
__host__ bool estaLlena(int* d_matriz);
__host__ bool finJuego(int* d_matriz);
__host__ bool movimientosPosibles(int* d_matriz);
// Funciones de host de carga y guardado de matrices:
__host__ void escribirMatriz(int* h_matriz, string nombreJugador, int* puntuacion, int* movimientos);
__host__ bool leerMatriz(int* h_matriz, string nombreJugador, int* puntuacion, int* movimientos);
// Funcion de movimiento en Host
__host__ bool desplazarMatriz(int *h_matrizEntrada, int *h_matrizSalida, int *d_matrizEntrada, int *d_matrizSalida, int *h_puntuacion, int despVertical, int despHorizontal);
// MAIN
int main(int argc, const char* argv[])
{
leerParametros(argc, argv);
// Declaracion de matrices en host:
int* h_matriz = (int *)malloc(bytesMatriz);
int* h_matrizResultado = (int *)malloc(bytesMatriz);
// Punteros a matrices en DEVICE:
int *d_matrizEntrada;
int *d_matrizSalida;
// Reserva de memoria en DEVICE
cudaMalloc((void **)&d_matrizEntrada, bytesMatriz);
cudaMalloc((void **)&d_matrizSalida, bytesMatriz);
// Relleno las matrices con 0s:
rellenarMatrizconcero(h_matriz);
rellenarMatrizconcero(h_matrizResultado);
if (automatico)
juegoAutomatico(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida);
else
juegoManual(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida);
// Libero la memoria de device
cudaFree(d_matrizEntrada);
cudaFree(d_matrizSalida);
return 0;
}
// ----------- MODOS DE JUEGO ----------- //
__host__ void juegoAutomatico(int *h_matriz, int *h_matrizResultado, int *d_matrizEntrada, int *d_matrizSalida)
{
cout << "+--------------------------------------------------------+" << endl;
cout << "| Bienvenido al 16384, se ha elegido el modo automatico. |" << endl;
cout << "+--------------------------------------------------------+" << endl;
inicializarMatriz(h_matriz);
// Se comprueban las caracteristicas de la tarjeta
cout << "+--------------------------------------------------------+" << endl;
caracteristicasTarjeta();
cout << "+--------------------------------------------------------+" << endl;
cout << endl;
system("pause");
system("cls");
// Contador de movimientos
int movimientos = 0;
int puntuacion = 0;
vidas = 5;
// Variable control de entrada
bool seguirJugando = false;
bool ganado = false;
while (!ganado && vidas > 0)
{
// Eligo un movimiento aleatorio
int movimiento = rand() % 4;
system("CLS");
// Y lo hago
switch (movimiento)
{
// PARAMETROS DESPLAZAR_MATRIZ -> matriz inicial, matriz resultado, desplazamiento eje y, desplazamiento eje x
case 0:
cout << "Muevo arriba " << endl;
seguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, DESP_NEG, NO_DESP); // Desplazar arriba
ganado = finJuego(d_matrizSalida);
break;
case 1:
cout << "Muevo abajo " << endl;
seguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, DESP_POS, NO_DESP); // Desplazar abajo
ganado = finJuego(d_matrizSalida);
break;
case 2:
cout << "Muevo izquierda " << endl;
seguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, NO_DESP, DESP_NEG); // Desplazar izquierda
ganado = finJuego(d_matrizSalida);
break;
case 3:
cout << "Muevo derecha " << endl;
seguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, NO_DESP, DESP_POS); // Desplazar derecha
ganado = finJuego(d_matrizSalida);
break;
}
movimientos++;
copiarMatriz(h_matrizResultado, h_matriz);
cout << "+------------------------------------------------------------+" << endl;
printf("|Movimiento: %d\tPuntuacion: %d\t Vidas: %d \n", movimientos, puntuacion, vidas);
cout << "+------------------------------------------------------------+" << endl;
pintarMatriz(h_matriz);
if (!seguirJugando && vidas > 1)
{
cout << "+---------------------------------------------------------------------------------------------+" << endl;
cout << "| No hay mas movimientos posibles, la maquina ha perdido. Hemos suspendido el test de Turing. |" << endl;
cout << "+---------------------------------------------------------------------------------------------+" << endl;
vidas -= 1;
cout << endl;
cout << "+---------------------------------------------------------------------------------------------+" << endl;
cout << "| Lo intentamos de nuevo (si/no)?. |" << endl;
cout << "+---------------------------------------------------------------------------------------------+" << endl;
string otraVez;
cin >> otraVez;
if (otraVez == "no")
{
cout << "Hasta la vista, Baby. " << endl;
exit(0);
}
else if(otraVez == "si")
{
rellenarMatrizconcero(h_matriz);
rellenarMatrizconcero(h_matrizResultado);
movimientos = 0;
seguirJugando = true;
}
}
else if (ganado)
{
cout << endl << "LA MAQUINA HA GANADO VIVA TURING " << endl;
exit(0);
}
// Sleep chungo de C++. Cambiar el 100 por lo que se quiera
//this_thread::sleep_for(chrono::milliseconds(100));
// Si se quiere avanzar con enters descomentar esto:
//system("PAUSE");
}
cout << "A la maquina no le quedan vidas. Fin de juego. Adios Terminator. " << endl;
exit(0);
}
__host__ void juegoManual(int *h_matriz, int *h_matrizResultado, int *d_matrizEntrada, int *d_matrizSalida)
{
// Muestro mensaje de bienvenida
cout << "+----------------------------------------------------+" << endl;
cout << "|Hola amigo bienvenido al 16384 que ganitas de jugar |" << endl;
cout << "+----------------------------------------------------+" << endl;
cout << endl;
// Muestro características de la tarjeta
cout << "+----------------------------------------------------+" << endl;
caracteristicasTarjeta();
cout << "+----------------------------------------------------+" << endl;
// Variables de control y estados iniciales:
int movimientos = 0; // Contador de movimientos por partida
int puntuacion = 0; // Puntuación total
vidas = 5; // Establezco vidas a 5.
char entrada1, entrada2; // Carácteres de lectura por teclado
bool correcto = false; // Variable control de entrada
bool puedeSeguirJugando = false; // Aún hay movimientos disponibles
bool ganado = false; // Si ha ganado
bool haGanadoYQuiereSeguir = false; // Comprobacion por si quiere seguir jugando despues de ganar
// Recojo nombre de usuario
string nombre;
cout << "+----------------------------------------------------+" << endl;
cout << "| Dame tu nombre amiguete: |" << endl;
cout << "+----------------------------------------------------+" << endl;
cin >> nombre;
cout << endl;
// Cargo (o no) la partida
cout << "+----------------------------------------------------+" << endl;
cout << "| Quieres cargar tu partida? |" << endl;
cout << "+----------------------------------------------------+" << endl;
string cargar;
cin >> cargar;
// Si quiere cargar y existe la partida, la cargo.
if (cargar == "si" && leerMatriz(h_matriz, nombre, &movimientos, &puntuacion))
{
cout << "+----------------------------------------------------+" << endl;
cout << "| Partida cargada. |" << endl;
cout << "+----------------------------------------------------+" << endl;
}
// Si no, establezco matriz.
else
{
inicializarMatriz(h_matriz);
}
// Juego:
while (true)
{
// Imprimo matriz y estadísticas
system("CLS");
cout << "+------------------------------------------------------------+" << endl;
printf("|Movimiento: %d\tPuntuacion: %d\t Vidas: %d \n", movimientos, puntuacion, vidas);
cout << "+------------------------------------------------------------+" << endl;
pintarMatriz(h_matriz);
// Tengo que volver a comprobar la entrada.
correcto = true;
// Las teclas de movimiento hacen input de dos caracteres,
// siendo el segundo el que nos importa para el movimiento
entrada1 = getch();
// Si el usuario quiere salir, se sale.
if (entrada1 == 's')
break;
else
{
// Obtengo segundo caracter
entrada2 = getch();
// Realizo jugada:
switch (entrada2)
{
// PARAMETROS DESPLAZAR_MATRIZ -> matriz inicial, matriz resultado, puntuacion, desplazamiento eje y, desplazamiento eje x
case 72:
puedeSeguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, DESP_NEG, NO_DESP); // Desplazar arriba
ganado = finJuego(d_matrizSalida);
break;
case 80:
puedeSeguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, DESP_POS, NO_DESP); // Desplazar abajo
ganado = finJuego(d_matrizSalida);
break;
case 75:
puedeSeguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, NO_DESP, DESP_NEG); // Desplazar izquierda
ganado = finJuego(d_matrizSalida);
break;
case 77:
puedeSeguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, NO_DESP, DESP_POS); // Desplazar derecha
ganado = finJuego(d_matrizSalida);
break;
default:
cout << "Caracter incorrecto. " << endl;
correcto = false;
}
}
// Tras hacer la jugada, compruebo el estado de la matriz.
if (correcto)
{
// Copio resultado a matriz:
copiarMatriz(h_matrizResultado, h_matriz);
// Incremento movimientos
movimientos++;
// Si pierde y le quedan vidas y no estaba farmeando puntos.
if (!puedeSeguirJugando && vidas > 1 && !haGanadoYQuiereSeguir)
{
// Resto una vida
vidas -= 1;
// Muestro mensaje por pantalla:
cout << "+---------------------------------------------------------------------------+" << endl;
cout << "| No hay mas movimientos posibles, fin de juego. Intentalo de nuevo. |" << endl;
cout << "| Te quedan: " << vidas << " vidas. | " << endl;
cout << "+---------------------------------------------------------------------------+" << endl;
// Recojo si quiere seguir jugando:
string otraVez;
do
{
cout << "+---------------------------------------------------------------------------+" << endl;
cout << "| Quieres intentarlo de nuevo (si/no)? |" << endl;
cout << "+---------------------------------------------------------------------------+" << endl;
cin >> otraVez;
}while (!(otraVez == "si") || !(otraVez != "no"));
// Si no quiere seguir jugando, se sale.
if (otraVez == "no")
{
cout << "Nos vemos amigo. " << endl;
exit(0);
}
// Si se quiere seguir jugando, se resetean datos.
else
{
rellenarMatrizconcero(h_matriz);
rellenarMatrizconcero(h_matrizResultado);
movimientos = 0;
ganado = false;
haGanadoYQuiereSeguir = false;
inicializarMatriz(h_matriz);
}
}
// Si pierde y no le quedan vidas y no estaba farmeando puntos.
else if (!puedeSeguirJugando && vidas == 1 && !haGanadoYQuiereSeguir)
{
vidas -= 1;
cout << endl << "No hay mas movimientos posibles, fin del juego." << endl;
cout << endl << "Además no te quedan vidas." << endl;
cout << "Esta es tu puntuacion final: " << puntuacion << endl;
exit(0);
}
// Si había ganado y ahora ya no puede seguir
else if (!puedeSeguirJugando && haGanadoYQuiereSeguir)
{
// Muestro mensaje por pantalla:
cout << "+---------------------------------------------------------------------------+" << endl;
cout << endl << "| No hay mas movimientos posibles, fin de juego. Intentalo de nuevo." << endl;
cout << endl << "| Te quedan: " << vidas << " vidas. " << endl;
cout << "+----------------------------------------------------------------------------+" << endl;
// Recojo si quiere seguir jugando:
string otraVez;
do
{
cout << "+---------------------------------------------------------------------------------------------+" << endl;
cout << "| Quieres intentarlo de nuevo (si/no)? |" << endl;
cout << "+---------------------------------------------------------------------------------------------+" << endl;
cin >> otraVez;
} while (otraVez != "si" || otraVez != "no");
// Si no quiere seguir jugando, se sale.
if (otraVez == "no")
{
cout << "Nos vemos amigo. " << endl;
exit(0);
}
// Si se quiere seguir jugando, se resetean datos.
else
{
rellenarMatrizconcero(h_matriz);
rellenarMatrizconcero(h_matrizResultado);
movimientos = 0;
ganado = false;
haGanadoYQuiereSeguir = false;
inicializarMatriz(h_matriz);
}
}
// Si acaba de ganar
else if (ganado && !haGanadoYQuiereSeguir)
{
cout << "+---------------------------------------------------------------------------+" << endl;
cout << "| Felicidades campeon, has ganado. Esta es tu puntuacion final: " << puntuacion << endl;
cout << "+---------------------------------------------------------------------------+" << endl;
string jugarMas;
while (!(jugarMas == "si") && !(jugarMas == "no"))
{
cout << endl << "Quieres seguir jugando?" << endl;
cin >> jugarMas;
}
if (jugarMas == "no")
{
cout << "Hasta luego!" << endl;
exit(0);
}
else
{
haGanadoYQuiereSeguir = true;
}
}
}
}
// Guardar partida
cout << "Quieres guardar partida? " << endl;
string entrada;
cin >> entrada;
if (entrada == "si")
{
escribirMatriz(h_matriz, nombre, &movimientos, &puntuacion);
cout << "Matriz guardada con nombre: " + nombre << endl;
}
}
// ----------- FUNCIONES DEVICE ----------- //
__device__ int getElemento(int *d_matriz, int fila, int columna, dimensionesMatriz* d_dimMatriz)
/*
Dada una matriz, devuelve el elemento en [fila][columna]
*/
{
return d_matriz[fila * d_dimMatriz->numColumnas + columna];
}
__device__ void setElemento(int *d_matriz, int fila, int columna, int elemento, dimensionesMatriz* d_dimMatriz)
/*
Dada una matriz, escribe el elemento en [fila][columna]
*/
{
d_matriz[fila * d_dimMatriz->numColumnas + columna] = elemento;
}
// --------- KERNELS PRINCIPALES ----------- //
__global__ void kernelCopiarMatriz(int *d_matrizCopia, int *d_matrizPega, dimensionesMatriz* d_dimMatriz)
/*
Dada una matriz a copiar, se pega todo el contenido de esta en la matriz a pegar.
*/
{
// Encuentro posicion:
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int columna = blockIdx.x * blockDim.x + threadIdx.x;
// Copio:
int elemento_copiar = getElemento(d_matrizCopia, fila, columna, d_dimMatriz);
// pego
setElemento(d_matrizPega, fila, columna, elemento_copiar, d_dimMatriz);
}
__global__ void kernelSuma(int *d_matrizEntrada, int *d_matrizSalida, int *d_puntuacion, int *despVertical, int *despHorizontal, dimensionesMatriz* d_dimMatriz)
/*
Dada una matriz de entrada y una de salida, escribe las sumas por desplazamiento en la matriz de salida.
*/
{
// Encuentro posicion:
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int columna = blockIdx.x * blockDim.x + threadIdx.x;
// Variables auxiliares para comprobaciones
int ultimaPosicion, desplazamiento, posicionActual;
bool esVertical;
// Analizo que tipo de movimiento se esta haciendo
if (*despVertical != 0)
{
// Si es vertical, ajusto parámetros:
posicionActual = fila;
desplazamiento = fila;
esVertical = true;
if (*despVertical == -1)
ultimaPosicion = 0;
else
ultimaPosicion = d_dimMatriz->numFilas - 1;
}
else
{
// Si es horizontal, ajusto parámetros
posicionActual = columna;
desplazamiento = columna;
esVertical = false;
if (*despHorizontal == -1)
ultimaPosicion = 0;
else
ultimaPosicion = d_dimMatriz->numColumnas - 1;
}
// Obtengo el elemento en la posicion
int elemento = getElemento(d_matrizEntrada, fila, columna, d_dimMatriz);
// Variable que controla si se multiplicare elm. x2 o no.
bool multiplicarem = false;
// Si no soy un 0:
if (elemento != 0 && posicionActual != ultimaPosicion)
{
// Compruebo paridad de los elementos en la dirección en la que me desplazo.
int paridad = 1;
// Casilla que compruebo en el bucle.
int casilla;
// Mientras no se encuentre un elemento distinto o se sobrepase la matriz
do {
// Casilla estudiada
if (esVertical)
casilla = getElemento(d_matrizEntrada, desplazamiento + *despVertical, columna, d_dimMatriz);
else
casilla = getElemento(d_matrizEntrada, fila, desplazamiento + *despHorizontal, d_dimMatriz);
// Si es diferente al elemento y no es 0, rompemos el bucle.
if (casilla != elemento && casilla != 0) { break; }
// Si hay otro elemento igual encima, aumento paridad
if (casilla == elemento) { paridad += 1; }
// Y sigo viendo
desplazamiento += *despHorizontal + *despVertical;
} while (desplazamiento != ultimaPosicion);
// Si hay pares, pongo mult. a true.
if (paridad % 2 == 0)
{
multiplicarem = true;
}
// Espero a todos los hilos
__syncthreads();
// Si debo multiplicar, multiplico
if (multiplicarem)
{
// Encuentro la pos. del elemento a mul * 2
int casilla;
desplazamiento = posicionActual; // Reseteamos el desplazamiento
// Mientras haya 0s me desplazo.
do {
desplazamiento += *despHorizontal + *despVertical;
if (esVertical)
casilla = getElemento(d_matrizEntrada, desplazamiento, columna, d_dimMatriz);
else
casilla = getElemento(d_matrizEntrada, fila, desplazamiento, d_dimMatriz);
} while (casilla != elemento);
// Sumo la puntuacion parcial que ha obtenido cada hilo con una suma atomica
atomicAdd(d_puntuacion, elemento * 2);
// Duplico el elemento que tengo encima
if (esVertical)
setElemento(d_matrizSalida, desplazamiento, columna, elemento * 2, d_dimMatriz);
else
setElemento(d_matrizSalida, fila, desplazamiento, elemento * 2, d_dimMatriz);
}
// Si no, me escribo a mi mismo en la matriz de salida.
else
{
setElemento(d_matrizSalida, fila, columna, getElemento(d_matrizEntrada, fila, columna, d_dimMatriz), d_dimMatriz);
}
// Espero a que todos los hilos multipliquen.
__syncthreads();
}
else
{
setElemento(d_matrizSalida, fila, columna, getElemento(d_matrizEntrada, fila, columna, d_dimMatriz), d_dimMatriz);
}
// Espero a que finalicen los hilos.
__syncthreads();
}
__global__ void kernelSetMatrizCeros(int *matriz, dimensionesMatriz* d_dimMatriz)
/*
Dada una matriz, setea todas sus posiciones a 0.
*/
{
// Encuentro posicion:
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int columna = blockIdx.x * blockDim.x + threadIdx.x;
// Elemento en la posici�n
setElemento(matriz, fila, columna, 0, d_dimMatriz);
// Espero a que el resto de hilos pongan 0s.
__syncthreads();
}
__global__ void kernelDesplazar(int *d_matrizEntrada, int *d_matrizSalida, int* despVertical, int* despHorizontal, dimensionesMatriz* d_dimMatriz)
/*
Dada una matriz, desplaza sus elementos 1 vez en la dirección indicada, si se puede.
*/
{
// Encuentro posicion y elemento de mi bloque:
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int columna = blockIdx.x * blockDim.x + threadIdx.x;
int elemento = getElemento(d_matrizEntrada, fila, columna, d_dimMatriz);
int ultimaPosicion, posicionActual;
// Analizo que tipo de movimiento se esta haciendo
if (*despVertical != 0)
{
posicionActual = fila;
if (*despVertical == -1)
ultimaPosicion = 0;
else
ultimaPosicion = d_dimMatriz->numFilas - 1;
}
else
{
posicionActual = columna;
if (*despHorizontal == -1)
ultimaPosicion = 0;
else
ultimaPosicion = d_dimMatriz->numColumnas - 1;
}
// Variable que dice si se debe mover o no.
bool desplazarem = false;
// Si soy distinto de 0 y no estoy en el limite
if ((posicionActual != ultimaPosicion) && (elemento != 0))
{
// Si la casilla siguiente a la mía en el movimiento es un 0, desplazaré hacia esa dirección.
int casillaVecina = getElemento(d_matrizEntrada, fila + *despVertical, columna + *despHorizontal, d_dimMatriz);
if (casillaVecina == 0)
{
desplazarem = true;
}
// Espero a que marquen el resto de hilos.
__syncthreads();
// Y desplazo:
if (desplazarem)
{
//printf("Soy [%d][%d] (%d) y me desplazo. \n", fila, columna, elemento);
setElemento(d_matrizSalida, fila + *despVertical, columna + *despHorizontal, elemento, d_dimMatriz);
}
// O escribo mi valor.
else
{
//printf("Soy [%d][%d] (%d) y NO me desplazo. \n", fila, columna, elemento);
setElemento(d_matrizSalida, fila, columna, elemento, d_dimMatriz);
}
// Espero resto de hilos:
__syncthreads();
}
// Si estoy en el limite
else if (elemento != 0)
{
//printf("Soy [%d][%d] (%d) y NO me desplazo pq estoy al limite o soy un 0. \n", fila, columna, elemento);
setElemento(d_matrizSalida, fila, columna, elemento, d_dimMatriz);
}
// Si no, soy un cero y no tengo que escribir nada porque d_matrizSalida es una matriz de 0s.
// Espero al resto de hilos
__syncthreads();
}
// -------- KERNELS COMPROBADORES ---------- //
__global__ void kernelComprobarIguales(int *d_matrizUno, int *d_matrizDos, bool* d_sonIguales, dimensionesMatriz* d_dimMatriz)
/*
Dadas dos matrices, deja sonIguales a true si lo son.
*/
{
// Encuentro posicion:
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int columna = blockIdx.x * blockDim.x + threadIdx.x;
// Elemento min & mout:
int elemento1 = getElemento(d_matrizUno, fila, columna, d_dimMatriz);
int elemento2 = getElemento(d_matrizDos, fila, columna, d_dimMatriz);
if (elemento1 != elemento2)
*d_sonIguales = false;
// Espero al resto de hilos:
__syncthreads();
}
__global__ void kernelComprobarLlena(int *d_matriz, bool* d_estaLlena, dimensionesMatriz* d_dimMatriz)
/*
Dadas una matriz, pone estaLlena a false si hay algún 0 y, por tanto, no está llena.
*/
{
// Encuentro posicion:
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int columna = blockIdx.x * blockDim.x + threadIdx.x;
// Elemento min & mout:
int elemento = getElemento(d_matriz, fila, columna, d_dimMatriz);
if (elemento == 0)
*d_estaLlena = false;
// Espero al resto de hilos:
__syncthreads();
}
__global__ void kernelComprobarSiHaGanado(int *d_matriz, bool* d_haGanado, dimensionesMatriz* d_dimMatriz)
/*
Dadas una matriz, pone estaLlena a false si hay algún 0 y, por tanto, no está llena.
*/
{
// Encuentro posicion:
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int columna = blockIdx.x * blockDim.x + threadIdx.x;
// Elemento min & mout:
int elemento = getElemento(d_matriz, fila, columna, d_dimMatriz);
if (elemento == OBJETIVO)
*d_haGanado = true;
// Espero al resto de hilos:
__syncthreads();
}
__global__ void kernelComprobarMovimientosPosibles(int *d_matriz, bool *seguirJugando, dimensionesMatriz* d_dimMatriz)
/*
Comprueba si hay elementos posibles, si los hay, devuelve true. Si no hay movimientos posibles, devuelve false
*/
{
// Encuentro posicion:
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int columna = blockIdx.x * blockDim.x + threadIdx.x;
int elemento = getElemento(d_matriz, fila, columna, d_dimMatriz);
bool seguirJugando_aux; // Booleano auxiliar para no escribir en el parametro directamente
// Booleanos para ver donde en que direccion podemos movernos
bool comprobarArr = true, comprobarAb = true, comprobarIzq = true, comprobarDer = true;
// Booleanos para comprobar los elementos con los que no podemos combinarnos
bool combinarArr = false, combinarAb = false, combinarIzq = false, combinarDer = false;
// Comprobamos en que posicion estamos para no salirnos fuera de los rangos de la matriz
if (fila == 0)
comprobarArr = false;
else if (fila == d_dimMatriz->numFilas - 1)
comprobarAb = false;
if (columna == 0)
comprobarIzq = false;
else if (columna == d_dimMatriz->numColumnas - 1)
comprobarDer = false;
int elementoEstudiado;
if (comprobarArr) {
elementoEstudiado = getElemento(d_matriz, fila - 1, columna, d_dimMatriz);
if (elementoEstudiado == elemento)
combinarArr = true;
}
if (comprobarAb) {
elementoEstudiado = getElemento(d_matriz, fila + 1, columna, d_dimMatriz);
if (elementoEstudiado == elemento)
combinarAb = true;
}
if (comprobarDer) {
elementoEstudiado = getElemento(d_matriz, fila, columna + 1, d_dimMatriz);
if (elementoEstudiado == elemento)
combinarDer = true;
}
if (comprobarIzq) {
elementoEstudiado = getElemento(d_matriz, fila, columna - 1, d_dimMatriz);
if (elementoEstudiado == elemento)
combinarIzq = true;
}
seguirJugando_aux = combinarArr || combinarAb || combinarIzq || combinarDer;
if (seguirJugando_aux)
*seguirJugando = seguirJugando_aux;
}
// -------- FUNCIONES AUX HOST ----------- //
__host__ void leerParametros(int argc, const char* argv[])
/*
Parsea los parámetros introducidos en la llamada al programa por consola, seteando
las variables del juego.
*/
{
if ((argc != 5) || ((argv[1][0] != 'a') && (argv[1][0] != 'm')) || ((argv[2][0] != 'f') && (argv[2][0] != 'd')))
{
cout << "Error en la introduccion de parametros, los parametros son:\nautomatico/manual (a/m), facil/dificil (f/d), num_filas, num_columnas\n\nUso = nombreprograma a/m f/d num_filas num_columnas\n" << endl;
exit(1);
}
else
{
dimMatriz.numFilas = atoi(argv[3]);
dimMatriz.numColumnas = atoi(argv[4]);
if (dimMatriz.numFilas != dimMatriz.numColumnas)
{
cout << "El numero de filas y de columnas no puede ser distinto, crack." << endl;
exit(2);
}
bytesMatriz = atoi(argv[3]) * atoi(argv[4]) * sizeof(int);
// Se dimensionan los hilos y los grids de bloques
if (dimMatriz.numFilas % 2 == 0)
{
dim3 bloques(2, 2);
dim3 hilos(dimMatriz.numFilas / 2, dimMatriz.numColumnas / 2);
dimGrid = bloques;
dimBlock = hilos;
}
else
{
dim3 bloques(1, 1);
dim3 hilos(dimMatriz.numFilas, dimMatriz.numColumnas);
dimGrid = bloques;
dimBlock = hilos;
}
if (argv[1][0] == 'a')
automatico = true;
else
automatico = false;
if (argv[2][0] == 'f')
modoDiablo = false;
else
modoDiablo = true;
}
}
__host__ void pintarMatriz(int *h_matriz)
/*
Dada una matriz, la dibuja por pantalla.
*/
{
HANDLE hConsole = GetStdHandle(STD_OUTPUT_HANDLE);
for (size_t i = 0; i < dimMatriz.numColumnas; i++)
{
SetConsoleTextAttribute(hConsole, 14);
cout << ("+-------");
}
cout << "+" << endl;
for (int i = 0; i < dimMatriz.numColumnas; i++)
{
for (int j = 0; j < dimMatriz.numFilas; j++)
{
// La funcion de print evalua en su interior si deberia poner un /t o no, dependiendo de la longitud del numero
printf("[%d%s]", *(h_matriz + i * dimMatriz.numColumnas + j),
*(h_matriz + i * dimMatriz.numColumnas + j) % 100000 == *(h_matriz + i * dimMatriz.numColumnas + j) ? "\t" : "");
}
printf("\n");
}
for (size_t i = 0; i < dimMatriz.numColumnas; i++)
{
cout << ("+-------");
}
cout << "+" << endl;
SetConsoleTextAttribute(hConsole, 15);
}
__host__ void caracteristicasTarjeta()
/*
Saca por pantalla las caracteristicas de todas las tarjetas graficas del pc
*/
{
// Recojo el número de tarjetas de la gráfica
int numTarjetas;
cudaGetDeviceCount(&numTarjetas);
// Para cada una, imprimo sus características
for (int i = 0; i < numTarjetas; i++) {
cudaDeviceProp caracteristicas;
cudaGetDeviceProperties(&caracteristicas, i);
printf("Numero de dispositivo: %d\n", i);
printf(" Nombre del dispositivo: %s\n", caracteristicas.name);
printf(" Frecuencia del reloj de memoria (KHz): %d\n",
caracteristicas.memoryClockRate);
printf(" Interfaz de memoria (bits): %d\n",
caracteristicas.memoryBusWidth);
printf(" Ancho de banda de memoria (GB/s): %f\n",
2.0*caracteristicas.memoryClockRate*(caracteristicas.memoryBusWidth / 8) / 1.0e6);
}
}
// ------- OP. CON MATRIZ EN HOST ------- //
__host__ void inicializarMatriz(int *h_matriz)
/*
Dada una matriz, la rellena con 0s, 2s, 4s u 8s, aleatoriamente y dependiendo del nivel de dificultad elegido.
*/
{
srand(time(NULL));
// Contador de casillas rellenadas. Dependiendo de la dificultad, tiene un tope distinto.
int contadorSemillas = 0;
int *posicionAleatoria;
if (modoDiablo)
{
int array_posibles_numeros[] = { 2,4 };
while ((contadorSemillas < 8) && (contadorSemillas < dimMatriz.numFilas * dimMatriz.numColumnas)) // Mientras no se hayan lanzado todas las semillas
{
posicionAleatoria = h_matriz + (rand() % dimMatriz.numColumnas) * dimMatriz.numColumnas + (rand() % dimMatriz.numFilas); // Calculo una posicion aleatoria donde poner una de las semillas
if (*posicionAleatoria == 0) // Si es 0 inicialmente, es decir, no ha sido escogida por segunda vez
{
*posicionAleatoria = array_posibles_numeros[rand() % 2]; // Cambio ese cero por un numero aleatorio entre los candidatos (2 o 4)
contadorSemillas++; // Sumo uno al contador de semillas
}
}
}
else
{
int array_posibles_numeros[] = { 2,4,8 };
while ((contadorSemillas < 15) && (contadorSemillas < dimMatriz.numFilas * dimMatriz.numColumnas)) // Mientras no se hayan lanzado todas las semillas
{
posicionAleatoria = h_matriz + (rand() % dimMatriz.numColumnas) * dimMatriz.numColumnas + (rand() % dimMatriz.numFilas); // Calculo una posicion aleatoria donde poner una de las semillas
if (*posicionAleatoria == 0) // Si es 0 inicialmente, es decir, no ha sido escogida por segunda vez
{
*posicionAleatoria = array_posibles_numeros[rand() % 3]; // Cambio ese cero por un numero aleatorio entre los candidatos (2, 4 u 8)
contadorSemillas++; // Sumo uno al contador de semillas
}
}
}
}
__host__ void rellenarMatrizconcero(int *h_matriz)
/*
Dada una matriz, la rellena con 0s.
*/
{
for (int i = 0; i < dimMatriz.numColumnas; ++i) {
for (int j = 0; j < dimMatriz.numFilas; ++j) {
*(h_matriz + i * dimMatriz.numColumnas + j) = 0;
}
}
}
__host__ void copiarMatriz(int *h_matrizCopia, int *h_matrizPega)
/*
Copia matriz de copia en matriz de pega.
*/
{
// Punteros a matrices en DEVICE:
int *d_matrizCopia;
int *d_matrizPega;
dimensionesMatriz* d_dimMatriz;
// Reservo memoria en DEVICE:
cudaMalloc((void **)&d_matrizCopia, bytesMatriz);
cudaMalloc((void **)&d_matrizPega, bytesMatriz);
cudaMalloc((void **)&d_dimMatriz, sizeof(dimensionesMatriz));
// Muevo matrices de HOST a DEVICE:
cudaMemcpy(d_matrizCopia, h_matrizCopia, bytesMatriz, cudaMemcpyHostToDevice);
cudaMemcpy(d_matrizPega, h_matrizPega, bytesMatriz, cudaMemcpyHostToDevice);
cudaMemcpy(d_dimMatriz, &dimMatriz, sizeof(dimensionesMatriz), cudaMemcpyHostToDevice);
// Primero, copio salida a entrada.
kernelCopiarMatriz << < dimGrid, dimBlock >> > (d_matrizCopia, d_matrizPega, d_dimMatriz);
cudaDeviceSynchronize();
// Despu�s, pongo a 0 la matriz de copia.
kernelSetMatrizCeros << < dimGrid, dimBlock >> > (d_matrizCopia, d_dimMatriz);
cudaDeviceSynchronize();
// Devolvemos resultado de DEVICE a HOST:
cudaMemcpy(h_matrizPega, d_matrizPega, bytesMatriz, cudaMemcpyDeviceToHost);
cudaMemcpy(h_matrizCopia, d_matrizCopia, bytesMatriz, cudaMemcpyDeviceToHost);
// Libero memoria de DEVICE:
cudaFree(d_matrizPega);
cudaFree(d_matrizCopia);
cudaFree(d_dimMatriz);
}
__host__ bool desplazarMatriz(int *h_matrizEntrada, int *h_matrizSalida, int* d_matrizEntrada, int* d_matrizSalida, int* h_puntuacion, int despVertical, int despHorizontal)
{
int* d_despVertical = 0;
int* d_despHorizontal = 0;
int* d_puntuacion = 0;
dimensionesMatriz* d_dimMatriz;
// Reservo memoria en DEVICE:
cudaMalloc((void **)&d_despVertical, sizeof(int));
cudaMalloc((void **)&d_despHorizontal, sizeof(int));
cudaMalloc((void **)&d_puntuacion, sizeof(int));
cudaMalloc((void **)&d_dimMatriz, sizeof(dimensionesMatriz));
// Muevo matrices de HOST a DEVICE:
cudaMemcpy(d_matrizEntrada, h_matrizEntrada, bytesMatriz, cudaMemcpyHostToDevice);
cudaMemcpy(d_matrizSalida, h_matrizSalida, bytesMatriz, cudaMemcpyHostToDevice);
cudaMemcpy(d_puntuacion, h_puntuacion, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_dimMatriz, &dimMatriz, sizeof(dimensionesMatriz), cudaMemcpyHostToDevice);
cudaMemcpy(d_despVertical, &despVertical, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_despHorizontal, &despHorizontal, sizeof(int), cudaMemcpyHostToDevice);
// Realizo la suma:
//kernelSuma << < dimGrid, dimBlock >> > (d_matrizEntrada, d_matrizSalida, d_puntuacion, d_despVertical, d_despHorizontal, d_dimMatriz);
kernelSumaYDesplazaDerecha << < dimGrid, dimBlock, dimMatriz.numFilas * dimMatriz.numColumnas * sizeof(int) >> > (d_matrizEntrada, d_matrizSalida, d_puntuacion, d_dimMatriz);
// Espero a que termine de operar:
cudaDeviceSynchronize();
cudaMemcpy(h_puntuacion, d_puntuacion, sizeof(int), cudaMemcpyDeviceToHost);
// Variable que dice si las matrices son iguales o no.
bool h_iguales = true;
bool *d_iguales;
cudaMalloc((void **)&d_iguales, sizeof(bool));
/*
// Mientras la matriz de entrada sea distinta de salida,
// significa que puedo seguir desplazando.
// Cuando sean iguales, detengo el bucle.
do
{
// Primero, copio salida a entrada.
kernelCopiarMatriz << < dimGrid, dimBlock >> > (d_matrizSalida, d_matrizEntrada, d_dimMatriz);
cudaDeviceSynchronize();
// Segundo, seteo salida a 0.
kernelSetMatrizCeros << < dimGrid, dimBlock >> > (d_matrizSalida, d_dimMatriz);
cudaDeviceSynchronize();
// Desplazo
kernelDesplazar << < dimGrid, dimBlock >> > (d_matrizEntrada, d_matrizSalida, d_despVertical, d_despHorizontal, d_dimMatriz);
cudaDeviceSynchronize();
// Compruebo si tengo que seguir desplazando.
// Doy por hecho que son iguales. Si no lo son, desplazare.
h_iguales = true;
// Muevo a device.
cudaMemcpy(d_iguales, &h_iguales, sizeof(bool), cudaMemcpyHostToDevice);
// Veo si son iguales.
kernelComprobarIguales << < dimGrid, dimBlock >> > (d_matrizSalida, d_matrizEntrada, d_iguales, d_dimMatriz);
cudaDeviceSynchronize();
// Limpio memoria tras trastear con d_iguales.
cudaMemcpy(&h_iguales, d_iguales, sizeof(bool), cudaMemcpyDeviceToHost);
} while (!h_iguales);
cudaFree(d_iguales);
// Compruebo si la matriz está llena y si se puede mover en cualq. dirección
bool h_movimientosPosibles = true;
// Devolvemos resultado de DEVICE a HOST:
cudaMemcpy(h_matrizSalida, d_matrizSalida, bytesMatriz, cudaMemcpyDeviceToHost);
// Si esta llena compruebo si hay movimientos posibles
if (estaLlena(d_matrizSalida))
h_movimientosPosibles = movimientosPosibles(d_matrizSalida);
// Si no, añado una nueva semilla a la matriz resultante en host
else {
nuevaSemilla(h_matrizSalida, 1); // Añadimos la nueva semilla
// Comprobamos si con la nueva semilla anadida, hemos perdido
cudaMemcpy(d_matrizSalida, h_matrizSalida, bytesMatriz, cudaMemcpyHostToDevice);
if (estaLlena(d_matrizSalida))
h_movimientosPosibles = movimientosPosibles(d_matrizSalida);
}
// Libero memoria de DEVICE:
cudaFree(d_despVertical);
cudaFree(d_despHorizontal);
cudaFree(d_dimMatriz);
return h_movimientosPosibles;
*/
system("pause");
return true;
}
__global__ void kernelSumaYDesplazaDerecha(int *d_matrizEntrada, int *d_matrizSalida, int *d_puntuacion, dimensionesMatriz* d_dimMatriz)
/*
Dada una matriz de entrada y una de salida, escribe las sumas por desplazamiento en la matriz de salida.
*/
{
// Memoria compartida inicial (se escribe la fila ini)
extern __shared__ int s_fila_ini[];//d_dimMatriz->numColumnas];
// Fila tras desplazar
//__shared__ int s_fila_fin[d_dimMatriz->numColumnas];
// Encuentro posicion:
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int columna = blockIdx.x * blockDim.x + threadIdx.x;
// Obtengo el elemento en la posicion
int elemento = getElemento(d_matrizEntrada, fila, columna, d_dimMatriz);
s_fila_ini[fila * d_dimMatriz->numColumnas + columna] = elemento;
printf("[%d][%d] = %d\n", fila, columna, s_fila_ini[fila * d_dimMatriz->numColumnas + columna]);
// Variables auxiliares para comprobaciones
int ultimaPosicion, desplazamiento, posicionActual;
// Si es horizontal, ajusto parámetros
desplazamiento = columna;
ultimaPosicion = d_dimMatriz->numColumnas - 1;
s_fila_ini[columna] = elemento;
//s_fila_fin[columna] = 0;
// Variable que controla si se multiplicare elm. x2 o no.
bool multiplicarem = false;
// Si no soy un 0:
if (elemento != 0 && posicionActual != ultimaPosicion)
{
// Compruebo paridad de los elementos en la dirección en la que me desplazo.
int paridad = 1;
// Casilla que compruebo en el bucle.
int casilla;
desplazamiento = columna + 1;
// Mientras no se encuentre un elemento distinto o se sobrepase la matriz
do {
// Casilla estudiada
casilla = s_fila_ini[desplazamiento + 1];
// Si es diferente al elemento y no es 0, rompemos el bucle.
if (casilla != elemento && casilla != 0) { break; }
// Si hay otro elemento igual encima, aumento paridad
if (casilla == elemento) { paridad += 1; }
// Y sigo viendo
desplazamiento += 1;
} while (desplazamiento != ultimaPosicion);
// Si hay pares, pongo mult. a true.
if (paridad % 2 == 0)
{
multiplicarem = true;
}
// Espero a todos los hilos
__syncthreads();
// Si debo multiplicar, multiplico
if (multiplicarem)
{
// Encuentro la pos. del elemento a mul * 2
int casilla;
desplazamiento = columna; // Reseteamos el desplazamiento
// Mientras haya 0s me desplazo.
do {
desplazamiento += 1;
casilla = s_fila_ini[desplazamiento + 1];
} while (casilla != elemento);
// Sumo la puntuacion parcial que ha obtenido cada hilo con una suma atomica
atomicAdd(d_puntuacion, elemento * 2);
// Duplico el elemento que tengo encima
//s_fila_sum[desplazamiento] = elemento * 2;
}
// Si no, me escribo a mi mismo en la matriz de salida.
else
{
//s_fila_sum[desplazamiento] = elemento;
}
// Espero a que todos los hilos multipliquen.
__syncthreads();
}
else
{
//s_fila_sum[columna] = elemento;
}
// Espero a que finalicen los hilos.
__syncthreads();
setElemento(d_matrizSalida, fila, columna, elemento, d_dimMatriz);
/*
// A PARTIR DE AHORA ES DESPLAZAMIENTO
// Veces que me tendré que desplazar:
desplazamiento = columna;
// Veces que tendré que desplazarme
int cuantosCeros = 0;
int casilla;
do {
casilla = s_fila_ini[desplazamiento + 1];
// Si hay otro elemento igual encima, aumento paridad
if (casilla == 0) { cuantosCeros += 1; }
// Y sigo viendo
desplazamiento += 1;
} while (desplazamiento != ultimaPosicion);
// una vez sé cuántas veces debo desplazarme, desplazo.
//s_fila_fin[columna + cuantosCeros] = s_fila_sum[columna]
setElemento(d_matrizSalida, fila, columna + cuantosCeros, elemento, d_dimMatriz);
*/
}
__host__ void setElementoHost(int *h_matriz, int fila, int columna, int elemento)
/*
Dada una matriz, escribe el elemento en [fila][columna]
*/
{
h_matriz[fila * dimMatriz.numColumnas + columna] = elemento;
}
__host__ void nuevaSemilla(int *h_matriz, int numSemillas)
/*
Crea numSemillas nuevas semillas en la matriz almacenada en device
*/
{
int *posicionAleatoria;
bool semillaGenerada = false;
if (modoDiablo)
{
int array_posibles_numeros[] = { 2,4 };
while ((!semillaGenerada) && (numSemillas != 0)) // Mientras no se haya encontrado una posicion con 0 y no se hallan lanzado todas las semillas
{
posicionAleatoria = h_matriz + (rand() % dimMatriz.numColumnas) * dimMatriz.numColumnas + (rand() % dimMatriz.numFilas); // Calculo una posicion aleatoria donde poner una de las semillas
if (*posicionAleatoria == 0) // Si es 0 inicialmente, es decir, no ha sido escogida por segunda vez
{
*posicionAleatoria = array_posibles_numeros[rand() % 2]; // Cambio ese cero por un numero aleatorio entre los candidatos (2 o 4)
semillaGenerada = true;
numSemillas--;
}
}
}
else
{
int array_posibles_numeros[] = { 2,4,8 };
while ((!semillaGenerada) && (numSemillas != 0)) // Mientras no se haya encontrado una posicion con 0 y no se hayan lanzado todas las semillas
{
posicionAleatoria = h_matriz + (rand() % dimMatriz.numColumnas) * dimMatriz.numColumnas + (rand() % dimMatriz.numFilas); // Calculo una posicion aleatoria donde poner una de las semillas
if (*posicionAleatoria == 0) // Si es 0 inicialmente, es decir, no ha sido escogida por segunda vez
{
*posicionAleatoria = array_posibles_numeros[rand() % 3]; // Cambio ese cero por un numero aleatorio entre los candidatos (2, 4 u 8)
semillaGenerada = true;
numSemillas--;
}
}
}
}
// ------- COMPROBACIONES EN HOST ------- //
__host__ bool estaLlena(int* d_matriz)
{
// Compruebo si la matriz esta llena
bool h_estaLlena = true;
bool *d_estaLlena;
dimensionesMatriz* d_dimMatriz;
cudaMalloc((void **)&d_estaLlena, sizeof(bool));
cudaMalloc((void **)&d_dimMatriz, sizeof(dimensionesMatriz));
cudaMemcpy(d_estaLlena, &h_estaLlena, sizeof(bool), cudaMemcpyHostToDevice);
cudaMemcpy(d_dimMatriz, &dimMatriz, sizeof(dimensionesMatriz), cudaMemcpyHostToDevice);
// Veo si está llena.
kernelComprobarLlena << < dimGrid, dimBlock >> > (d_matriz, d_estaLlena, d_dimMatriz);
cudaDeviceSynchronize();
cudaMemcpy(&h_estaLlena, d_estaLlena, sizeof(bool), cudaMemcpyDeviceToHost);
// Limpio memoria tras trastear con d_estaLlena.
cudaFree(d_estaLlena);
cudaFree(d_dimMatriz);
return h_estaLlena;
}
__host__ bool finJuego(int* d_matriz)
{
// Compruebo si la matriz contiene algún 16384
bool h_haGanado = false;
bool *d_haGanado;
dimensionesMatriz* d_dimMatriz;
cudaMalloc((void **)&d_haGanado, sizeof(bool));
cudaMalloc((void **)&d_dimMatriz, sizeof(dimensionesMatriz));
cudaMemcpy(d_haGanado, &h_haGanado, sizeof(bool), cudaMemcpyHostToDevice);
cudaMemcpy(d_dimMatriz, &dimMatriz, sizeof(dimensionesMatriz), cudaMemcpyHostToDevice);
// Veo si está llena.
kernelComprobarSiHaGanado << < dimGrid, dimBlock >> > (d_matriz, d_haGanado, d_dimMatriz);
cudaDeviceSynchronize();
cudaMemcpy(&h_haGanado, d_haGanado, sizeof(bool), cudaMemcpyDeviceToHost);
// Limpio memoria tras trastear con d_estaLlena.
cudaFree(d_haGanado);
cudaFree(d_dimMatriz);
return h_haGanado;
}
__host__ bool movimientosPosibles(int* d_matriz)
/*
Llama al kernel de comprobacion de movimientos posibles
*/
{
bool h_movimientosPosibles = false;
dimensionesMatriz* d_dimMatriz;
bool *d_movimientosPosibles;
cudaMalloc((void **)&d_movimientosPosibles, sizeof(bool));
cudaMalloc((void **)&d_dimMatriz, sizeof(dimensionesMatriz));
cudaMemcpy(d_movimientosPosibles, &h_movimientosPosibles, sizeof(bool), cudaMemcpyHostToDevice);
cudaMemcpy(d_dimMatriz, &dimMatriz, sizeof(dimensionesMatriz), cudaMemcpyHostToDevice);
// Compruebo si hay movimientos que se puedan hacer
kernelComprobarMovimientosPosibles << < dimGrid, dimBlock >> > (d_matriz, d_movimientosPosibles, d_dimMatriz);
cudaDeviceSynchronize();
// Paso el booleano a memoria del host y libero la memoria de device
cudaMemcpy(&h_movimientosPosibles, d_movimientosPosibles, sizeof(bool), cudaMemcpyDeviceToHost);
cudaFree(d_dimMatriz);
cudaFree(d_movimientosPosibles);
return h_movimientosPosibles;
}
// ----- GUARDADO Y LECTURA ----- //
__host__ void escribirMatriz(int* h_matriz, string nombreJugador, int* puntuacion, int* movimientos)
{
FILE *archivo;
// Preparo nombre:
nombreJugador += ".txt";
char * nombreArchivo = new char[nombreJugador.length() + 1];
strcpy(nombreArchivo, nombreJugador.c_str());
// Abro archivo:
archivo = fopen(nombreArchivo, "w");
if (archivo == NULL)
{
cout << "Error escribiendo partida. " << endl;
}
else
{
fprintf(archivo, "%d\n", dimMatriz.numFilas);
fprintf(archivo, "%d\n", dimMatriz.numColumnas);
fprintf(archivo, "%d\n", vidas);
fprintf(archivo, "%d\n", *movimientos);
fprintf(archivo, "%d\n", *puntuacion);
for (int i = 0; i < dimMatriz.numColumnas; ++i) {
for (int j = 0; j < dimMatriz.numFilas; ++j) {
fprintf(archivo, "%d ", *(h_matriz + i * dimMatriz.numColumnas + j));
}
fprintf(archivo, "\n");
}
}
fclose(archivo);
}
__host__ bool leerMatriz(int* h_matriz, string nombreJugador, int* puntuacion, int* movimientos)
{
// Cargo el archivo
ifstream in(nombreJugador + ".txt");
bool lecturaCorrecta = true;
// Si error
if (!in)
{
cout << "Erro abriendo el archivo. La partida no existe, se iniciara una partida nueva." << endl;
lecturaCorrecta = false;
}
// Si no, escribo matriz
else
{
int a_filas, a_columnas;
in >> a_filas;
in >> a_columnas;
in >> vidas;
if (a_filas != dimMatriz.numFilas || a_columnas != dimMatriz.numColumnas)
{
cout << "La partida cargada no es congruente con el numero de filas/columnas pasada como parametro." << endl;
cout << "Se iniciara una partida nueva." << endl;
lecturaCorrecta = false;
}
else
{
// Cargo movimientos y puntuacion
in >> *movimientos;
in >> *puntuacion;
for (int fila = 0; fila < dimMatriz.numFilas; fila++)
{
for (int columna = 0; columna < dimMatriz.numColumnas; columna++)
{
// Parseo el numero
int num;
in >> num;
// Lo escribo en la posicion
setElementoHost(h_matriz, fila, columna, num);
}
}
}
}
// Cierro archivo
in.close();
return lecturaCorrecta;
}
|
b37b0c640bf2dabb1242b6a2a640098ba47f40b9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Inverse Discrete Cosine Transform in Column wise (DCT one)
* DCT_I_Column_Inverse
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DCT_I_Column_Inverse(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DCT_I_Column_Inverse.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const double PI_d = 3.141592653589793238462643383279502884; //pi
__global__ void DCTI_Column_Inverse_Kernel(double *A, double *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
const double PI_d = 3.141592653589793238462643383279502884; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ double As[TILE_DIM][TILE_DIM];
__shared__ double Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numARows - 1) / TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < numARows && Row < numARows) { As[threadIdx.y][threadIdx.x] = cos((Row*PI_d*(threadIdx.x + (k*TILE_DIM)) / (numARows - 1)))*sqrt(1.0 / (1 + DELTA((threadIdx.x + (k*TILE_DIM)) + 1, 1) + DELTA((threadIdx.x + (k*TILE_DIM)) + 1, numARows)))*sqrt(1.0 / (1 + DELTA(1, Row + 1) + DELTA(numARows, Row + 1)))*sqrt(2.0 / numARows); }
//As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x];
else { As[threadIdx.y][threadIdx.x] = 0.0; }
if (k*TILE_DIM + threadIdx.y < numARows && Col < numAColumns){ Bs[threadIdx.y][threadIdx.x] = A[(k*TILE_DIM + threadIdx.y)*numAColumns + Col]; }
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDCTInverseColumnOne(double * A, double * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
//double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//float * hostComputedC;
double * deviceA;
//double * deviceB;
double * deviceC;
//hostA = (float *)malloc(sizeof(float)*numARows*numAColumns);
//hostB = (float *)malloc(sizeof(float)*numBRows*numBColumns);
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(hipMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns));
//hipMalloc((void **)&deviceB, sizeof(double)*numBRows*numBColumns);
gpuErrchk(hipMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns));
//thrust::device_ptr< double >dev_ptr_A(deviceA);
//thrust::device_ptr< double >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(hipMemcpy(deviceA, hostA, sizeof(double)*numARows*numAColumns, hipMemcpyHostToDevice));
//hipMemcpy(deviceB, hostB, sizeof(double)*numBRows*numBColumns, hipMemcpyHostToDevice);
/////////////////////////////////////////////////////////
dim3 dimBlock(TILE_DIM, TILE_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCTI_Column_Inverse_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(hipFree(deviceA));
//hipFree(deviceB);
gpuErrchk(hipFree(deviceC));
return;
}
| b37b0c640bf2dabb1242b6a2a640098ba47f40b9.cu | /*
* Inverse Discrete Cosine Transform in Column wise (DCT one)
* DCT_I_Column_Inverse
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DCT_I_Column_Inverse(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DCT_I_Column_Inverse.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cuda.h>
#include <cuda_runtime.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const double PI_d = 3.141592653589793238462643383279502884; //pi
__global__ void DCTI_Column_Inverse_Kernel(double *A, double *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
const double PI_d = 3.141592653589793238462643383279502884; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ double As[TILE_DIM][TILE_DIM];
__shared__ double Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numARows - 1) / TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < numARows && Row < numARows) { As[threadIdx.y][threadIdx.x] = cos((Row*PI_d*(threadIdx.x + (k*TILE_DIM)) / (numARows - 1)))*sqrt(1.0 / (1 + DELTA((threadIdx.x + (k*TILE_DIM)) + 1, 1) + DELTA((threadIdx.x + (k*TILE_DIM)) + 1, numARows)))*sqrt(1.0 / (1 + DELTA(1, Row + 1) + DELTA(numARows, Row + 1)))*sqrt(2.0 / numARows); }
//As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x];
else { As[threadIdx.y][threadIdx.x] = 0.0; }
if (k*TILE_DIM + threadIdx.y < numARows && Col < numAColumns){ Bs[threadIdx.y][threadIdx.x] = A[(k*TILE_DIM + threadIdx.y)*numAColumns + Col]; }
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDCTInverseColumnOne(double * A, double * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
//double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//float * hostComputedC;
double * deviceA;
//double * deviceB;
double * deviceC;
//hostA = (float *)malloc(sizeof(float)*numARows*numAColumns);
//hostB = (float *)malloc(sizeof(float)*numBRows*numBColumns);
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(cudaMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns));
//cudaMalloc((void **)&deviceB, sizeof(double)*numBRows*numBColumns);
gpuErrchk(cudaMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns));
//thrust::device_ptr< double >dev_ptr_A(deviceA);
//thrust::device_ptr< double >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(cudaMemcpy(deviceA, hostA, sizeof(double)*numARows*numAColumns, cudaMemcpyHostToDevice));
//cudaMemcpy(deviceB, hostB, sizeof(double)*numBRows*numBColumns, cudaMemcpyHostToDevice);
/////////////////////////////////////////////////////////
dim3 dimBlock(TILE_DIM, TILE_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCTI_Column_Inverse_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(cudaFree(deviceA));
//cudaFree(deviceB);
gpuErrchk(cudaFree(deviceC));
return;
}
|
4aa25ac32124014aec4aa5fc603bd67c55283e23.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
/*
* Perfom a reduction from data of length 'size' to result, where length of result will be 'number of blocks'.
*/
extern "C"
__global__ void addProduct(int n, float *a, float *b, float *c, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] + b[i] * c[i];
}
} | 4aa25ac32124014aec4aa5fc603bd67c55283e23.cu | #include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
/*
* Perfom a reduction from data of length 'size' to result, where length of result will be 'number of blocks'.
*/
extern "C"
__global__ void addProduct(int n, float *a, float *b, float *c, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] + b[i] * c[i];
}
} |
b1d86ff9eafff15e643bd3e3bebc5e22ccc8a9af.hip | // !!! This is a file automatically generated by hipify!!!
#include <agency/agency.hpp>
#include <agency/cuda.hpp>
#include <cassert>
#include <algorithm>
#include <iostream>
#include <mutex>
#include <thread>
__managed__ int result;
struct make_7
{
__host__ __device__
int operator()() const
{
return 7;
}
};
struct make_42
{
__host__ __device__
int operator()() const
{
return 42;
}
};
struct make_1
{
__host__ __device__
int operator()() const
{
return 1;
}
};
struct functor
{
int *result;
template<class Index>
__device__
void operator()(const Index&, int& past, int& outer_shared, int& inner_shared, int& inner_inner_shared)
{
atomicAdd(result, past + outer_shared + inner_shared + inner_inner_shared);
}
};
int main()
{
using namespace agency;
using outer_executor_type = cuda::this_thread::parallel_executor;
using inner_executor_type = cuda::grid_executor;
int num_devices = 0;
hipError_t error = hipGetDeviceCount(&num_devices);
if(error)
{
std::string what("CUDA error after hipGetDeviceCount(): ");
what += std::string(hipGetErrorString(error));
throw std::runtime_error(what);
}
{
// test executor_array then_execute()
using executor_type = executor_array<inner_executor_type, outer_executor_type>;
using shape_type = executor_shape_t<executor_type>;
using index_type = executor_index_t<executor_type>;
using container_type = executor_container_t<executor_type,int>;
executor_type exec(num_devices);
for(size_t i = 0; i < exec.size(); ++i)
{
exec[i].device(i);
}
auto shape = exec.make_shape(exec.size(),{2,2});
auto past = agency::make_ready_future<int>(exec, 13);
auto f = exec.bulk_then_execute([=] __host__ __device__ (const index_type& idx, int& past, container_type& results, int& outer_shared, int& inner_shared, int& inner_inner_shared)
{
printf("hello from agent %d %d %d\n", (int)agency::detail::get<0>(idx), (int)agency::detail::get<1>(idx), (int)agency::detail::get<2>(idx));
results[idx] = past + outer_shared + inner_shared + inner_inner_shared;
},
shape,
past,
[=] __host__ __device__ { return container_type(shape); },
make_7(),
make_42(),
make_1()
);
auto results = f.get();
assert(results.size() == agency::detail::index_space_size(shape));
assert(std::all_of(results.begin(), results.end(), [](int x){ return x == 13 + 7 + 42 + 1; }));
}
{
// test flattened executor_array
using executor_array_type = executor_array<inner_executor_type, outer_executor_type>;
using executor_type = flattened_executor<executor_array_type>;
using shape_type = executor_shape_t<executor_type>;
using index_type = executor_index_t<executor_type>;
using container_type = executor_container_t<executor_type,int>;
executor_array_type exec_array(num_devices);
for(size_t i = 0; i < exec_array.size(); ++i)
{
exec_array[i].device(i);
}
executor_type exec{exec_array};
shape_type shape{exec_array.size(), 2};
auto ready = agency::make_ready_future<void>(exec);
auto f = exec.bulk_then_execute([] __host__ __device__ (const index_type& idx, container_type& results, int& outer_shared, int& inner_shared)
{
results[idx] = 13 + outer_shared + inner_shared;
},
shape,
ready,
[=] __host__ __device__ { return container_type(shape); },
make_7(),
make_42()
);
auto results = f.get();
assert(results.size() == agency::detail::index_space_size(shape));
assert(std::all_of(results.begin(), results.end(), [](int x){ return x == 13 + 7 + 42; }));
}
std::cout << "OK" << std::endl;
return 0;
}
| b1d86ff9eafff15e643bd3e3bebc5e22ccc8a9af.cu | #include <agency/agency.hpp>
#include <agency/cuda.hpp>
#include <cassert>
#include <algorithm>
#include <iostream>
#include <mutex>
#include <thread>
__managed__ int result;
struct make_7
{
__host__ __device__
int operator()() const
{
return 7;
}
};
struct make_42
{
__host__ __device__
int operator()() const
{
return 42;
}
};
struct make_1
{
__host__ __device__
int operator()() const
{
return 1;
}
};
struct functor
{
int *result;
template<class Index>
__device__
void operator()(const Index&, int& past, int& outer_shared, int& inner_shared, int& inner_inner_shared)
{
atomicAdd(result, past + outer_shared + inner_shared + inner_inner_shared);
}
};
int main()
{
using namespace agency;
using outer_executor_type = cuda::this_thread::parallel_executor;
using inner_executor_type = cuda::grid_executor;
int num_devices = 0;
cudaError_t error = cudaGetDeviceCount(&num_devices);
if(error)
{
std::string what("CUDA error after cudaGetDeviceCount(): ");
what += std::string(cudaGetErrorString(error));
throw std::runtime_error(what);
}
{
// test executor_array then_execute()
using executor_type = executor_array<inner_executor_type, outer_executor_type>;
using shape_type = executor_shape_t<executor_type>;
using index_type = executor_index_t<executor_type>;
using container_type = executor_container_t<executor_type,int>;
executor_type exec(num_devices);
for(size_t i = 0; i < exec.size(); ++i)
{
exec[i].device(i);
}
auto shape = exec.make_shape(exec.size(),{2,2});
auto past = agency::make_ready_future<int>(exec, 13);
auto f = exec.bulk_then_execute([=] __host__ __device__ (const index_type& idx, int& past, container_type& results, int& outer_shared, int& inner_shared, int& inner_inner_shared)
{
printf("hello from agent %d %d %d\n", (int)agency::detail::get<0>(idx), (int)agency::detail::get<1>(idx), (int)agency::detail::get<2>(idx));
results[idx] = past + outer_shared + inner_shared + inner_inner_shared;
},
shape,
past,
[=] __host__ __device__ { return container_type(shape); },
make_7(),
make_42(),
make_1()
);
auto results = f.get();
assert(results.size() == agency::detail::index_space_size(shape));
assert(std::all_of(results.begin(), results.end(), [](int x){ return x == 13 + 7 + 42 + 1; }));
}
{
// test flattened executor_array
using executor_array_type = executor_array<inner_executor_type, outer_executor_type>;
using executor_type = flattened_executor<executor_array_type>;
using shape_type = executor_shape_t<executor_type>;
using index_type = executor_index_t<executor_type>;
using container_type = executor_container_t<executor_type,int>;
executor_array_type exec_array(num_devices);
for(size_t i = 0; i < exec_array.size(); ++i)
{
exec_array[i].device(i);
}
executor_type exec{exec_array};
shape_type shape{exec_array.size(), 2};
auto ready = agency::make_ready_future<void>(exec);
auto f = exec.bulk_then_execute([] __host__ __device__ (const index_type& idx, container_type& results, int& outer_shared, int& inner_shared)
{
results[idx] = 13 + outer_shared + inner_shared;
},
shape,
ready,
[=] __host__ __device__ { return container_type(shape); },
make_7(),
make_42()
);
auto results = f.get();
assert(results.size() == agency::detail::index_space_size(shape));
assert(std::all_of(results.begin(), results.end(), [](int x){ return x == 13 + 7 + 42; }));
}
std::cout << "OK" << std::endl;
return 0;
}
|
d6de8be8186b1fd2bcc0a8e188220ad8879f780a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define IDT(i,j) (i)*((i)+1)/2+(j)
typedef struct{
double *v;
int dim;
int size;
} Grid;
__global__ void cero(Grid m){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<=m.dim-1 && j<=i){
if(j==0 || i==m.dim-1 || i==j)
m.v[IDT(i,j)]=0.0;
else
m.v[IDT(i,j)]=0.0;
}
}
__global__ void uno(Grid m){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<=m.dim-1 && j<=i){
if(j==0 || i==m.dim-1 || i==j)
m.v[IDT(i,j)]=0.0;
else
m.v[IDT(i,j)]=0.0;
}
}
__global__ void inicializa_f(Grid m){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<=m.dim-1 && j<=i)
{
if(j==0 || i==m.dim-1 || i==j)
m.v[IDT(i,j)]=0.0;
if(j>0 && j<i && i<m.dim-1)
m.v[IDT(i,j)]=4.0;
}
}
__global__ void random(Grid m){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<=m.dim-1 && j<=i)
{
if(j==0 || i==m.dim-1 || i==j)
m.v[IDT(i,j)]=0.0;
if(j>0 && j<i && i<m.dim-1)
m.v[IDT(i,j)]=i^2+j^2;
}
}
__global__ void suaviza_r(Grid u, Grid f, double * op)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<=u.dim-1 && j<=i)
{
if(j>0 && j<i && i<u.dim-1)
if((i+j)%3==0)
{
u.v[IDT(i,j)]=(f.v[IDT(i,j)]-op[0]*u.v[IDT(i-1,j-1)]
-op[1]*u.v[IDT(i-1,j )]
-op[2]*u.v[IDT(i-1,j+1)]
-op[3]*u.v[IDT(i ,j-1)]
-op[5]*u.v[IDT(i ,j+1)]
-op[6]*u.v[IDT(i+1,j-1)]
-op[7]*u.v[IDT(i+1,j )]
-op[8]*u.v[IDT(i+1,j+1)])/op[4];
}
}
}
__global__ void suaviza_g(Grid u, Grid f, double * op)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<=u.dim-1 && j<=i)
{
if(j>0 && j<i && i<u.dim-1)
if((i+j)%3==1)
{
u.v[IDT(i,j)]=(f.v[IDT(i,j)]-op[0]*u.v[IDT(i-1,j-1)]
-op[1]*u.v[IDT(i-1,j )]
-op[2]*u.v[IDT(i-1,j+1)]
-op[3]*u.v[IDT(i ,j-1)]
-op[5]*u.v[IDT(i ,j+1)]
-op[6]*u.v[IDT(i+1,j-1)]
-op[7]*u.v[IDT(i+1,j )]
-op[8]*u.v[IDT(i+1,j+1)])/op[4];
}
}
}
__global__ void suaviza_b(Grid u, Grid f, double * op)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<=u.dim-1 && j<=i)
{
if(j>0 && j<i && i<u.dim-1)
if((i+j)%3==2)
{
u.v[IDT(i,j)]=(f.v[IDT(i,j)]-op[0]*u.v[IDT(i-1,j-1)]
-op[1]*u.v[IDT(i-1,j )]
-op[2]*u.v[IDT(i-1,j+1)]
-op[3]*u.v[IDT(i ,j-1)]
-op[5]*u.v[IDT(i ,j+1)]
-op[6]*u.v[IDT(i+1,j-1)]
-op[7]*u.v[IDT(i+1,j )]
-op[8]*u.v[IDT(i+1,j+1)])/op[4];
}
}
}
__global__ void defecto(Grid u, Grid f, Grid d, double * op)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<=u.dim-1 && j<=i)
{
if(j>0 && j<i && i<u.dim-1 && i>0) /* puntos interiores */
{
d.v[IDT(i,j)]=f.v[IDT(i,j)]
-op[1]*u.v[IDT(i-1,j)]
-op[2]*u.v[IDT(i-1,j+1)]
-op[3]*u.v[IDT(i,j-1)]
-op[4]*u.v[IDT(i,j)]
-op[5]*u.v[IDT(i,j+1)]
-op[6]*u.v[IDT(i+1,j-1)]
-op[7]*u.v[IDT(i+1,j)]
-op[8]*u.v[IDT(i+1,j+1)];
}
}
}
__global__ void restringe(Grid sup, Grid inf)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(j>0 && j<i && i<inf.dim-1 && i>0) /* puntos interiores de la malla inferior*/
{
inf.v[IDT(i,j)]=(sup.v[IDT(2*i,2*j)]+0.5*(
sup.v[IDT(2*i-1,2*j-1)]
+sup.v[IDT(2*i-1,2*j )]
+sup.v[IDT(2*i ,2*j-1)]
+sup.v[IDT(2*i ,2*j+1)]
+sup.v[IDT(2*i+1,2*j )]
+sup.v[IDT(2*i+1,2*j+1)]))/4;
}
}
__global__ void interpola(Grid inf, Grid sup)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<=inf.dim-1 && j<=i)
{
if(j>0 && j<i && i<inf.dim-1 && i>0) /* puntos interiores de la malla inferior*/
{
sup.v[IDT(2*i ,2*j )] = inf.v[IDT(i,j)];
sup.v[IDT(2*i-1,2*j-1)] = (inf.v[IDT(i,j)]+inf.v[IDT(i-1,j-1)])/2;
sup.v[IDT(2*i-1,2*j )] = (inf.v[IDT(i,j)]+inf.v[IDT(i-1,j )])/2;
sup.v[IDT(2*i ,2*j+1)] = (inf.v[IDT(i,j)]+inf.v[IDT(i ,j+1)])/2;
sup.v[IDT(2*i+1,2*j+1)] = (inf.v[IDT(i,j)]+inf.v[IDT(i+1,j+1)])/2;
sup.v[IDT(2*i+1,2*j )] = (inf.v[IDT(i,j)]+inf.v[IDT(i+1,j )])/2;
sup.v[IDT(2*i ,2*j-1)] = (inf.v[IDT(i,j)]+inf.v[IDT(i-1,j-1)])/2;
}
}
}
__global__ void suma(Grid u, Grid v)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<u.dim && j<=i)
{
u.v[IDT(i,j)]=u.v[IDT(i,j)]+v.v[IDT(i,j)];
}
}
| d6de8be8186b1fd2bcc0a8e188220ad8879f780a.cu | #define IDT(i,j) (i)*((i)+1)/2+(j)
typedef struct{
double *v;
int dim;
int size;
} Grid;
__global__ void cero(Grid m){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<=m.dim-1 && j<=i){
if(j==0 || i==m.dim-1 || i==j)
m.v[IDT(i,j)]=0.0;
else
m.v[IDT(i,j)]=0.0;
}
}
__global__ void uno(Grid m){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<=m.dim-1 && j<=i){
if(j==0 || i==m.dim-1 || i==j)
m.v[IDT(i,j)]=0.0;
else
m.v[IDT(i,j)]=0.0;
}
}
__global__ void inicializa_f(Grid m){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<=m.dim-1 && j<=i)
{
if(j==0 || i==m.dim-1 || i==j)
m.v[IDT(i,j)]=0.0;
if(j>0 && j<i && i<m.dim-1)
m.v[IDT(i,j)]=4.0;
}
}
__global__ void random(Grid m){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<=m.dim-1 && j<=i)
{
if(j==0 || i==m.dim-1 || i==j)
m.v[IDT(i,j)]=0.0;
if(j>0 && j<i && i<m.dim-1)
m.v[IDT(i,j)]=i^2+j^2;
}
}
__global__ void suaviza_r(Grid u, Grid f, double * op)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<=u.dim-1 && j<=i)
{
if(j>0 && j<i && i<u.dim-1)
if((i+j)%3==0)
{
u.v[IDT(i,j)]=(f.v[IDT(i,j)]-op[0]*u.v[IDT(i-1,j-1)]
-op[1]*u.v[IDT(i-1,j )]
-op[2]*u.v[IDT(i-1,j+1)]
-op[3]*u.v[IDT(i ,j-1)]
-op[5]*u.v[IDT(i ,j+1)]
-op[6]*u.v[IDT(i+1,j-1)]
-op[7]*u.v[IDT(i+1,j )]
-op[8]*u.v[IDT(i+1,j+1)])/op[4];
}
}
}
__global__ void suaviza_g(Grid u, Grid f, double * op)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<=u.dim-1 && j<=i)
{
if(j>0 && j<i && i<u.dim-1)
if((i+j)%3==1)
{
u.v[IDT(i,j)]=(f.v[IDT(i,j)]-op[0]*u.v[IDT(i-1,j-1)]
-op[1]*u.v[IDT(i-1,j )]
-op[2]*u.v[IDT(i-1,j+1)]
-op[3]*u.v[IDT(i ,j-1)]
-op[5]*u.v[IDT(i ,j+1)]
-op[6]*u.v[IDT(i+1,j-1)]
-op[7]*u.v[IDT(i+1,j )]
-op[8]*u.v[IDT(i+1,j+1)])/op[4];
}
}
}
__global__ void suaviza_b(Grid u, Grid f, double * op)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<=u.dim-1 && j<=i)
{
if(j>0 && j<i && i<u.dim-1)
if((i+j)%3==2)
{
u.v[IDT(i,j)]=(f.v[IDT(i,j)]-op[0]*u.v[IDT(i-1,j-1)]
-op[1]*u.v[IDT(i-1,j )]
-op[2]*u.v[IDT(i-1,j+1)]
-op[3]*u.v[IDT(i ,j-1)]
-op[5]*u.v[IDT(i ,j+1)]
-op[6]*u.v[IDT(i+1,j-1)]
-op[7]*u.v[IDT(i+1,j )]
-op[8]*u.v[IDT(i+1,j+1)])/op[4];
}
}
}
__global__ void defecto(Grid u, Grid f, Grid d, double * op)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<=u.dim-1 && j<=i)
{
if(j>0 && j<i && i<u.dim-1 && i>0) /* puntos interiores */
{
d.v[IDT(i,j)]=f.v[IDT(i,j)]
-op[1]*u.v[IDT(i-1,j)]
-op[2]*u.v[IDT(i-1,j+1)]
-op[3]*u.v[IDT(i,j-1)]
-op[4]*u.v[IDT(i,j)]
-op[5]*u.v[IDT(i,j+1)]
-op[6]*u.v[IDT(i+1,j-1)]
-op[7]*u.v[IDT(i+1,j)]
-op[8]*u.v[IDT(i+1,j+1)];
}
}
}
__global__ void restringe(Grid sup, Grid inf)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(j>0 && j<i && i<inf.dim-1 && i>0) /* puntos interiores de la malla inferior*/
{
inf.v[IDT(i,j)]=(sup.v[IDT(2*i,2*j)]+0.5*(
sup.v[IDT(2*i-1,2*j-1)]
+sup.v[IDT(2*i-1,2*j )]
+sup.v[IDT(2*i ,2*j-1)]
+sup.v[IDT(2*i ,2*j+1)]
+sup.v[IDT(2*i+1,2*j )]
+sup.v[IDT(2*i+1,2*j+1)]))/4;
}
}
__global__ void interpola(Grid inf, Grid sup)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<=inf.dim-1 && j<=i)
{
if(j>0 && j<i && i<inf.dim-1 && i>0) /* puntos interiores de la malla inferior*/
{
sup.v[IDT(2*i ,2*j )] = inf.v[IDT(i,j)];
sup.v[IDT(2*i-1,2*j-1)] = (inf.v[IDT(i,j)]+inf.v[IDT(i-1,j-1)])/2;
sup.v[IDT(2*i-1,2*j )] = (inf.v[IDT(i,j)]+inf.v[IDT(i-1,j )])/2;
sup.v[IDT(2*i ,2*j+1)] = (inf.v[IDT(i,j)]+inf.v[IDT(i ,j+1)])/2;
sup.v[IDT(2*i+1,2*j+1)] = (inf.v[IDT(i,j)]+inf.v[IDT(i+1,j+1)])/2;
sup.v[IDT(2*i+1,2*j )] = (inf.v[IDT(i,j)]+inf.v[IDT(i+1,j )])/2;
sup.v[IDT(2*i ,2*j-1)] = (inf.v[IDT(i,j)]+inf.v[IDT(i-1,j-1)])/2;
}
}
}
__global__ void suma(Grid u, Grid v)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<u.dim && j<=i)
{
u.v[IDT(i,j)]=u.v[IDT(i,j)]+v.v[IDT(i,j)];
}
}
|
934e249421c3dd89ffe06954395f5787d4ee15ea.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "deinter_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int NX = 1;
float *X = NULL;
hipMalloc(&X, XSIZE*YSIZE);
int NY = 1;
float *Y = NULL;
hipMalloc(&Y, XSIZE*YSIZE);
int B = 2;
float *OUT = NULL;
hipMalloc(&OUT, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
deinter_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, NX,X,NY,Y,B,OUT);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
deinter_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, NX,X,NY,Y,B,OUT);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
deinter_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, NX,X,NY,Y,B,OUT);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 934e249421c3dd89ffe06954395f5787d4ee15ea.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "deinter_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int NX = 1;
float *X = NULL;
cudaMalloc(&X, XSIZE*YSIZE);
int NY = 1;
float *Y = NULL;
cudaMalloc(&Y, XSIZE*YSIZE);
int B = 2;
float *OUT = NULL;
cudaMalloc(&OUT, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
deinter_kernel<<<gridBlock,threadBlock>>>(NX,X,NY,Y,B,OUT);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
deinter_kernel<<<gridBlock,threadBlock>>>(NX,X,NY,Y,B,OUT);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
deinter_kernel<<<gridBlock,threadBlock>>>(NX,X,NY,Y,B,OUT);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
a500d215cf423acc1fe9aabaaa068b5fbd93d181.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/hip/IndexKernel.h>
#include <ATen/native/IndexKernel.h>
#include <type_traits>
#include <ATen/core/TensorBase.h>
#include <ATen/Dispatch.h>
#include <ATen/core/Array.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/cub.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/KernelUtils.cuh>
#include <ATen/native/quantized/IndexKernel.h>
#include <c10/core/Scalar.h>
namespace at::native {
static constexpr int launch_bound2 = 4;
static constexpr int launch_size_nd = 128;
template<int nt, int vt, typename func_t>
C10_LAUNCH_BOUNDS_2(nt, launch_bound2)
__global__ void index_elementwise_kernel(int N, func_t f) {
int tid = threadIdx.x;
int nv = nt * vt;
int idx = nv * blockIdx.x + tid;
#pragma unroll
for (int i = 0; i < vt; i++) {
if (idx < N) {
f(idx);
idx += nt;
}
}
}
template<int nt, int vt, typename func_t>
static void launch_kernel(int64_t N, const func_t& f) {
TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max());
if (N == 0) {
return;
}
dim3 block(nt);
dim3 grid((N + block.x * vt - 1) / (block.x * vt));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( index_elementwise_kernel<nt, vt, func_t>), dim3(grid), dim3(block), 0, stream, N, f);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <typename func_t>
void gpu_index_kernel(TensorIteratorBase& iter, IntArrayRef index_size, IntArrayRef index_stride, const func_t& f) {
int num_indices = index_size.size();
AT_ASSERT(num_indices == index_stride.size());
AT_ASSERT(num_indices == iter.ntensors() - 2);
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
gpu_index_kernel(sub_iter, index_size, index_stride, f);
}
return;
}
auto sizes = at::detail::Array<int64_t, MAX_DIMS>(0);
auto strides = at::detail::Array<int64_t, MAX_DIMS>(0);
auto index_ptrs = at::detail::Array<char*, MAX_DIMS>(nullptr);
for (int i = 0; i < num_indices; i++) {
sizes[i] = index_size[i];
strides[i] = index_stride[i];
index_ptrs[i] = (char*)iter.data_ptr(i + 2);
}
char* out_ptr = (char*)iter.data_ptr(0);
char* in_ptr = (char*)iter.data_ptr(1);
auto offset_calc = make_offset_calculator<3>(iter);
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), [=]__device__(int idx) {
auto offsets = offset_calc.get(idx);
char* out_data = out_ptr + offsets[0];
char* in_data = in_ptr + offsets[1];
int64_t offset = 0;
#pragma unroll
for (int i = 0; i < num_indices; i++) {
int64_t index = *(int64_t*)(index_ptrs[i] + offsets[2]);
CUDA_KERNEL_ASSERT(index >= -sizes[i] && index < sizes[i] && "index out of bounds");
if (index < 0) {
index += sizes[i];
}
offset += index * strides[i];
}
f(out_data, in_data, offset);
});
}
// The kernels are templated on an opaque, self-aligned type of the correct
// size to avoid redundant kernels for different types of the same size.
template <int N> struct alignas(N) OpaqueType { char data[N]; };
template <typename scalar_t>
void index_fill_kernel_impl(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride,
scalar_t fill_val) {
if (0 == iter.numel()) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
index_fill_kernel_impl(sub_iter, dim, self_dim_size, self_dim_stride, fill_val);
}
return;
}
char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
auto offset_calc = make_offset_calculator<2>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]);
auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]);
CUDA_KERNEL_ASSERT(idx >= -self_dim_size && idx < self_dim_size && "index out of bounds");
if (idx < 0) {
idx += self_dim_size;
}
self_data[idx * self_dim_stride] = fill_val;
};
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop);
}
template <typename scalar_t>
void index_copy_kernel_impl(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
index_copy_kernel_impl<scalar_t>(sub_iter, dim, self_dim_size, self_dim_stride);
}
return;
}
char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
char* __restrict__ source_ptr = reinterpret_cast<char*>(iter.data_ptr(2));
auto offset_calc = make_offset_calculator<3>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]);
auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]);
auto* __restrict__ source_data = reinterpret_cast<scalar_t*>(source_ptr + offsets[2]);
CUDA_KERNEL_ASSERT(idx >= 0 && idx < self_dim_size && "index_copy_(): index out of bounds");
self_data[idx * self_dim_stride] = *source_data;
};
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop);
}
template <typename scalar_t>
void index_kernel_impl(TensorIteratorBase& iter, IntArrayRef index_size, IntArrayRef index_stride) {
gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) {
*(scalar_t*)out_data = *(scalar_t*)(in_data + offset);
});
}
template <typename scalar_t>
void index_put_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) {
gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) {
*(scalar_t*)(out_data + offset) = *(scalar_t*)in_data;
});
}
static void index_kernel(TensorIteratorBase& iter, IntArrayRef index_size, IntArrayRef index_stride) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(kComplexHalf, kHalf, kBool, kBFloat16, iter.dtype(), "index_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
index_kernel_impl<dtype>(iter, index_size, index_stride);
});
}
static void index_fill_kernel(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride,
const Scalar& source) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, kComplexHalf,
iter.dtype(), "index_fill_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
auto fill_val = source.to<scalar_t>();
auto fill_val_opaque = *reinterpret_cast<dtype*>(&fill_val);
index_fill_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride, fill_val_opaque);
});
}
static void index_copy_kernel(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride) {
// See note [Writing Nondeterministic Operations]
// Nondeterministic when index contains duplicate entries
// this kernel will not be called when torch.use_deterministic_algorithms(True)
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, kComplexHalf,
iter.dtype(), "index_copy_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
index_copy_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride);
});
}
static void index_put_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate) {
TORCH_CHECK(!accumulate, "index_put does not support accumulate=true");
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(kComplexHalf, kHalf, kBool, kBFloat16, iter.dtype(), "index_put", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
index_put_kernel_impl<dtype>(iter, index_size, index_stride);
});
}
void index_put_kernel_quantized_cuda(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate, double scale, int zero_point) {
TORCH_CHECK(!accumulate, "index_put does not support accumulate=true");
AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(iter.dtype(), "index_put", [&] {
constexpr int64_t qmin = std::numeric_limits<typename scalar_t::underlying>::min();
constexpr int64_t qmax = std::numeric_limits<typename scalar_t::underlying>::max();
float inv_scale = 1.0f / static_cast<float>(scale);
gpu_index_kernel(iter, index_size, index_stride, [inv_scale, zero_point, qmin, qmax]C10_DEVICE(char* out_data, char* in_data, int64_t offset) {
int64_t qvalue = static_cast<int64_t>(zero_point + nearbyintf(*(float*)in_data * inv_scale));
qvalue = min(max(qvalue, qmin), qmax);
*(scalar_t*)(out_data + offset) = static_cast<scalar_t>(qvalue);
});
});
}
template <typename scalar_t, typename index_t, typename func_t>
void cuda_take_put_kernel(
TensorIterator& iter,
const TensorBase& indexed,
const func_t& f) {
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
cuda_take_put_kernel<scalar_t, index_t>(sub_iter, indexed, f);
}
return;
}
const auto numel = indexed.numel();
const bool is_contiguous = indexed.is_contiguous();
char* __restrict__ iterated_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
const auto offset_calc = make_offset_calculator<2>(iter);
using uindex_t = std::make_unsigned_t<index_t>;
// OffsetCalculator needs the sizes and strides reveresed
const auto indexed_sizes = std::vector<int64_t>(indexed.sizes().rbegin(), indexed.sizes().rend());
const auto indexed_strides = std::vector<int64_t>(indexed.strides().rbegin(), indexed.strides().rend());
const auto* indexed_strides_data = indexed_strides.data();
const auto offset_indexed = OffsetCalculator<1, uindex_t>(indexed.dim(),
indexed_sizes.data(),
&indexed_strides_data);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto& iterated = *reinterpret_cast<scalar_t*>(iterated_ptr + offsets[0]);
const auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]);
CUDA_KERNEL_ASSERT(idx < numel && idx >= -numel && "cuda_take_put_kernel() index out of bounds");
index_t offset = static_cast<index_t>(idx);
if (offset < 0) {
offset += numel;
}
if (!is_contiguous) {
offset = offset_indexed.get(offset)[0];
}
f(iterated, offset);
};
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop);
}
void put_kernel(TensorIterator& iter, const TensorBase& output, const bool accumulate) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "put_cuda", [&] {
// Cannot use `OpaqueType`, as we need the actual type for `fastSpecializedgpuAtomicAdd`
AT_DISPATCH_INDEX_TYPES(cuda::detail::canUse32BitIndexMath(output) ? ScalarType::Int : ScalarType::Long,
"put_cuda_index", [&] {
auto* __restrict__ indexed_ptr = output.template data_ptr<scalar_t>();
if (accumulate) {
index_t numel = output.numel();
cuda_take_put_kernel<scalar_t, index_t>(iter, output,
[numel, indexed_ptr] __device__(scalar_t& iterated, const index_t offset) {
fastSpecializedAtomicAdd(indexed_ptr, offset, numel, iterated);
});
}
else {
cuda_take_put_kernel<scalar_t, index_t>(iter, output,
[indexed_ptr] __device__(scalar_t& iterated, const index_t offset) {
indexed_ptr[offset] = iterated;
});
}
});
});
}
void take_kernel(
TensorIterator& iter,
const TensorBase& input) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "take_cuda", [&] {
// Cannot use `OpaqueType`, as Tensor::data_ptr<OpaqueType<N>> is not implemented
AT_DISPATCH_INDEX_TYPES(cuda::detail::canUse32BitIndexMath(input) ? ScalarType::Int : ScalarType::Long,
"take_cuda_index", [&] {
const auto* __restrict__ indexed_ptr = input.template data_ptr<scalar_t>();
cuda_take_put_kernel<scalar_t, index_t>(iter, input,
[indexed_ptr] __device__(scalar_t& iterated, const index_t offset) {
iterated = indexed_ptr[offset];
});
});
});
}
namespace {
template <typename mask_t>
__global__ void masked_scatter_size_check(int64_t *mask_exclusive_sum, mask_t *mask, int64_t srcSize) {
// Convert exclusive sum to inclusive sum
auto totalElements = *mask_exclusive_sum + *mask;
CUDA_KERNEL_ASSERT(totalElements <= srcSize);
}
template <typename mask_t>
void masked_scatter_cuda_impl(
const TensorBase &self, const TensorBase &mask,
const TensorBase &maskPrefixSum, const TensorBase &source) {
auto srcSize = source.numel();
auto mask_cont = mask.contiguous();
auto mask_numel = mask.numel();
// Use a prefix sum to determine the output locations of the masked elements
auto maskPrefixSum_data = maskPrefixSum.data_ptr<int64_t>();
auto mask_data = mask_cont.data_ptr<mask_t>();
at::cuda::cub::mask_exclusive_sum(
mask_data, maskPrefixSum_data, mask_numel);
// Asynchronously check that the number of `1` elements present in the mask
// must be <= the number of elements available in `src`.
hipLaunchKernelGGL(( masked_scatter_size_check), dim3(1), dim3(1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
&maskPrefixSum_data[mask_numel - 1], &mask_data[mask_numel - 1], srcSize);
C10_HIP_KERNEL_LAUNCH_CHECK();
// We are getting elements from `src` based on an offset from
// `maskPrefixSum`, so that should be made contiguous too
auto source_contig = source.contiguous();
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self)
.add_input(self)
.add_input(mask_cont)
.add_input(maskPrefixSum)
.build();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
ScalarType::Bool,
ScalarType::BFloat16,
ScalarType::Half,
self.scalar_type(),
"masked_scatter_",
[&]() {
auto source_ptr = source_contig.data_ptr<scalar_t>();
gpu_kernel(
iter, [=] GPU_LAMBDA(scalar_t a, mask_t mask, int64_t maskPrefixSum) -> scalar_t {
if (mask) {
return source_ptr[maskPrefixSum];
}
return a;
});
hipGetLastError();
});
}
} // anonymous namespace
void launch_masked_scatter_kernel(
const TensorBase &self, const TensorBase &mask,
const TensorBase &maskPrefixSum, const TensorBase &source) {
if (mask.scalar_type() == kBool) {
masked_scatter_cuda_impl<bool>(self, mask, maskPrefixSum, source);
} else {
masked_scatter_cuda_impl<uint8_t>(self, mask, maskPrefixSum, source);
}
}
template <typename scalar_t>
void flip_kernel_impl(TensorIterator& iter) {
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
flip_kernel_impl<scalar_t>(sub_iter);
}
return;
}
char* const __restrict__ out_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
const char* const __restrict__ in_ptr = reinterpret_cast<const char*>(iter.data_ptr(1));
const auto offset_calc = make_offset_calculator<2, /*signed_strides=*/true>(iter);
auto loop = [=]C10_DEVICE(const int i) {
const auto offsets = offset_calc.get(i);
// offsets can be negative here, but it's fine
scalar_t* const __restrict__ out_data = reinterpret_cast<scalar_t*>(out_ptr + offsets[0]);
const scalar_t* const __restrict__ in_data = reinterpret_cast<const scalar_t*>(in_ptr + offsets[1]);
*out_data = *in_data;
};
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop);
}
void flip_kernel(TensorIterator& iter, const bool quantized) {
if (quantized) {
AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(iter.dtype(), "flip_quantized_cuda",
[&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
flip_kernel_impl<dtype>(iter);
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(), "flip_cuda",
[&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
flip_kernel_impl<dtype>(iter);
});
}
}
REGISTER_DISPATCH(index_stub, &index_kernel);
REGISTER_DISPATCH(index_fill_stub, &index_fill_kernel);
REGISTER_DISPATCH(index_copy_stub, &index_copy_kernel);
REGISTER_DISPATCH(index_put_stub, &index_put_kernel);
REGISTER_DISPATCH(put_stub, &put_kernel);
REGISTER_DISPATCH(take_stub, &take_kernel);
REGISTER_DISPATCH(flip_stub, &flip_kernel);
REGISTER_CUDA_DISPATCH(index_put_kernel_quantized_stub, &index_put_kernel_quantized_cuda);
} // namespace at::native
| a500d215cf423acc1fe9aabaaa068b5fbd93d181.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/cuda/IndexKernel.h>
#include <ATen/native/IndexKernel.h>
#include <type_traits>
#include <ATen/core/TensorBase.h>
#include <ATen/Dispatch.h>
#include <ATen/core/Array.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/cub.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/KernelUtils.cuh>
#include <ATen/native/quantized/IndexKernel.h>
#include <c10/core/Scalar.h>
namespace at::native {
static constexpr int launch_bound2 = 4;
static constexpr int launch_size_nd = 128;
template<int nt, int vt, typename func_t>
C10_LAUNCH_BOUNDS_2(nt, launch_bound2)
__global__ void index_elementwise_kernel(int N, func_t f) {
int tid = threadIdx.x;
int nv = nt * vt;
int idx = nv * blockIdx.x + tid;
#pragma unroll
for (int i = 0; i < vt; i++) {
if (idx < N) {
f(idx);
idx += nt;
}
}
}
template<int nt, int vt, typename func_t>
static void launch_kernel(int64_t N, const func_t& f) {
TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max());
if (N == 0) {
return;
}
dim3 block(nt);
dim3 grid((N + block.x * vt - 1) / (block.x * vt));
auto stream = at::cuda::getCurrentCUDAStream();
index_elementwise_kernel<nt, vt, func_t><<<grid, block, 0, stream>>>(N, f);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <typename func_t>
void gpu_index_kernel(TensorIteratorBase& iter, IntArrayRef index_size, IntArrayRef index_stride, const func_t& f) {
int num_indices = index_size.size();
AT_ASSERT(num_indices == index_stride.size());
AT_ASSERT(num_indices == iter.ntensors() - 2);
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
gpu_index_kernel(sub_iter, index_size, index_stride, f);
}
return;
}
auto sizes = at::detail::Array<int64_t, MAX_DIMS>(0);
auto strides = at::detail::Array<int64_t, MAX_DIMS>(0);
auto index_ptrs = at::detail::Array<char*, MAX_DIMS>(nullptr);
for (int i = 0; i < num_indices; i++) {
sizes[i] = index_size[i];
strides[i] = index_stride[i];
index_ptrs[i] = (char*)iter.data_ptr(i + 2);
}
char* out_ptr = (char*)iter.data_ptr(0);
char* in_ptr = (char*)iter.data_ptr(1);
auto offset_calc = make_offset_calculator<3>(iter);
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), [=]__device__(int idx) {
auto offsets = offset_calc.get(idx);
char* out_data = out_ptr + offsets[0];
char* in_data = in_ptr + offsets[1];
int64_t offset = 0;
#pragma unroll
for (int i = 0; i < num_indices; i++) {
int64_t index = *(int64_t*)(index_ptrs[i] + offsets[2]);
CUDA_KERNEL_ASSERT(index >= -sizes[i] && index < sizes[i] && "index out of bounds");
if (index < 0) {
index += sizes[i];
}
offset += index * strides[i];
}
f(out_data, in_data, offset);
});
}
// The kernels are templated on an opaque, self-aligned type of the correct
// size to avoid redundant kernels for different types of the same size.
template <int N> struct alignas(N) OpaqueType { char data[N]; };
template <typename scalar_t>
void index_fill_kernel_impl(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride,
scalar_t fill_val) {
if (0 == iter.numel()) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
index_fill_kernel_impl(sub_iter, dim, self_dim_size, self_dim_stride, fill_val);
}
return;
}
char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
auto offset_calc = make_offset_calculator<2>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]);
auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]);
CUDA_KERNEL_ASSERT(idx >= -self_dim_size && idx < self_dim_size && "index out of bounds");
if (idx < 0) {
idx += self_dim_size;
}
self_data[idx * self_dim_stride] = fill_val;
};
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop);
}
template <typename scalar_t>
void index_copy_kernel_impl(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
index_copy_kernel_impl<scalar_t>(sub_iter, dim, self_dim_size, self_dim_stride);
}
return;
}
char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
char* __restrict__ source_ptr = reinterpret_cast<char*>(iter.data_ptr(2));
auto offset_calc = make_offset_calculator<3>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]);
auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]);
auto* __restrict__ source_data = reinterpret_cast<scalar_t*>(source_ptr + offsets[2]);
CUDA_KERNEL_ASSERT(idx >= 0 && idx < self_dim_size && "index_copy_(): index out of bounds");
self_data[idx * self_dim_stride] = *source_data;
};
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop);
}
template <typename scalar_t>
void index_kernel_impl(TensorIteratorBase& iter, IntArrayRef index_size, IntArrayRef index_stride) {
gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) {
*(scalar_t*)out_data = *(scalar_t*)(in_data + offset);
});
}
template <typename scalar_t>
void index_put_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) {
gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) {
*(scalar_t*)(out_data + offset) = *(scalar_t*)in_data;
});
}
static void index_kernel(TensorIteratorBase& iter, IntArrayRef index_size, IntArrayRef index_stride) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(kComplexHalf, kHalf, kBool, kBFloat16, iter.dtype(), "index_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
index_kernel_impl<dtype>(iter, index_size, index_stride);
});
}
static void index_fill_kernel(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride,
const Scalar& source) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, kComplexHalf,
iter.dtype(), "index_fill_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
auto fill_val = source.to<scalar_t>();
auto fill_val_opaque = *reinterpret_cast<dtype*>(&fill_val);
index_fill_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride, fill_val_opaque);
});
}
static void index_copy_kernel(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride) {
// See note [Writing Nondeterministic Operations]
// Nondeterministic when index contains duplicate entries
// this kernel will not be called when torch.use_deterministic_algorithms(True)
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, kComplexHalf,
iter.dtype(), "index_copy_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
index_copy_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride);
});
}
static void index_put_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate) {
TORCH_CHECK(!accumulate, "index_put does not support accumulate=true");
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(kComplexHalf, kHalf, kBool, kBFloat16, iter.dtype(), "index_put", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
index_put_kernel_impl<dtype>(iter, index_size, index_stride);
});
}
void index_put_kernel_quantized_cuda(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate, double scale, int zero_point) {
TORCH_CHECK(!accumulate, "index_put does not support accumulate=true");
AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(iter.dtype(), "index_put", [&] {
constexpr int64_t qmin = std::numeric_limits<typename scalar_t::underlying>::min();
constexpr int64_t qmax = std::numeric_limits<typename scalar_t::underlying>::max();
float inv_scale = 1.0f / static_cast<float>(scale);
gpu_index_kernel(iter, index_size, index_stride, [inv_scale, zero_point, qmin, qmax]C10_DEVICE(char* out_data, char* in_data, int64_t offset) {
int64_t qvalue = static_cast<int64_t>(zero_point + nearbyintf(*(float*)in_data * inv_scale));
qvalue = min(max(qvalue, qmin), qmax);
*(scalar_t*)(out_data + offset) = static_cast<scalar_t>(qvalue);
});
});
}
template <typename scalar_t, typename index_t, typename func_t>
void cuda_take_put_kernel(
TensorIterator& iter,
const TensorBase& indexed,
const func_t& f) {
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
cuda_take_put_kernel<scalar_t, index_t>(sub_iter, indexed, f);
}
return;
}
const auto numel = indexed.numel();
const bool is_contiguous = indexed.is_contiguous();
char* __restrict__ iterated_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
const auto offset_calc = make_offset_calculator<2>(iter);
using uindex_t = std::make_unsigned_t<index_t>;
// OffsetCalculator needs the sizes and strides reveresed
const auto indexed_sizes = std::vector<int64_t>(indexed.sizes().rbegin(), indexed.sizes().rend());
const auto indexed_strides = std::vector<int64_t>(indexed.strides().rbegin(), indexed.strides().rend());
const auto* indexed_strides_data = indexed_strides.data();
const auto offset_indexed = OffsetCalculator<1, uindex_t>(indexed.dim(),
indexed_sizes.data(),
&indexed_strides_data);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto& iterated = *reinterpret_cast<scalar_t*>(iterated_ptr + offsets[0]);
const auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]);
CUDA_KERNEL_ASSERT(idx < numel && idx >= -numel && "cuda_take_put_kernel() index out of bounds");
index_t offset = static_cast<index_t>(idx);
if (offset < 0) {
offset += numel;
}
if (!is_contiguous) {
offset = offset_indexed.get(offset)[0];
}
f(iterated, offset);
};
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop);
}
void put_kernel(TensorIterator& iter, const TensorBase& output, const bool accumulate) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "put_cuda", [&] {
// Cannot use `OpaqueType`, as we need the actual type for `fastSpecializedgpuAtomicAdd`
AT_DISPATCH_INDEX_TYPES(cuda::detail::canUse32BitIndexMath(output) ? ScalarType::Int : ScalarType::Long,
"put_cuda_index", [&] {
auto* __restrict__ indexed_ptr = output.template data_ptr<scalar_t>();
if (accumulate) {
index_t numel = output.numel();
cuda_take_put_kernel<scalar_t, index_t>(iter, output,
[numel, indexed_ptr] __device__(scalar_t& iterated, const index_t offset) {
fastSpecializedAtomicAdd(indexed_ptr, offset, numel, iterated);
});
}
else {
cuda_take_put_kernel<scalar_t, index_t>(iter, output,
[indexed_ptr] __device__(scalar_t& iterated, const index_t offset) {
indexed_ptr[offset] = iterated;
});
}
});
});
}
void take_kernel(
TensorIterator& iter,
const TensorBase& input) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "take_cuda", [&] {
// Cannot use `OpaqueType`, as Tensor::data_ptr<OpaqueType<N>> is not implemented
AT_DISPATCH_INDEX_TYPES(cuda::detail::canUse32BitIndexMath(input) ? ScalarType::Int : ScalarType::Long,
"take_cuda_index", [&] {
const auto* __restrict__ indexed_ptr = input.template data_ptr<scalar_t>();
cuda_take_put_kernel<scalar_t, index_t>(iter, input,
[indexed_ptr] __device__(scalar_t& iterated, const index_t offset) {
iterated = indexed_ptr[offset];
});
});
});
}
namespace {
template <typename mask_t>
__global__ void masked_scatter_size_check(int64_t *mask_exclusive_sum, mask_t *mask, int64_t srcSize) {
// Convert exclusive sum to inclusive sum
auto totalElements = *mask_exclusive_sum + *mask;
CUDA_KERNEL_ASSERT(totalElements <= srcSize);
}
template <typename mask_t>
void masked_scatter_cuda_impl(
const TensorBase &self, const TensorBase &mask,
const TensorBase &maskPrefixSum, const TensorBase &source) {
auto srcSize = source.numel();
auto mask_cont = mask.contiguous();
auto mask_numel = mask.numel();
// Use a prefix sum to determine the output locations of the masked elements
auto maskPrefixSum_data = maskPrefixSum.data_ptr<int64_t>();
auto mask_data = mask_cont.data_ptr<mask_t>();
at::cuda::cub::mask_exclusive_sum(
mask_data, maskPrefixSum_data, mask_numel);
// Asynchronously check that the number of `1` elements present in the mask
// must be <= the number of elements available in `src`.
masked_scatter_size_check<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>(
&maskPrefixSum_data[mask_numel - 1], &mask_data[mask_numel - 1], srcSize);
C10_CUDA_KERNEL_LAUNCH_CHECK();
// We are getting elements from `src` based on an offset from
// `maskPrefixSum`, so that should be made contiguous too
auto source_contig = source.contiguous();
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self)
.add_input(self)
.add_input(mask_cont)
.add_input(maskPrefixSum)
.build();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
ScalarType::Bool,
ScalarType::BFloat16,
ScalarType::Half,
self.scalar_type(),
"masked_scatter_",
[&]() {
auto source_ptr = source_contig.data_ptr<scalar_t>();
gpu_kernel(
iter, [=] GPU_LAMBDA(scalar_t a, mask_t mask, int64_t maskPrefixSum) -> scalar_t {
if (mask) {
return source_ptr[maskPrefixSum];
}
return a;
});
cudaGetLastError();
});
}
} // anonymous namespace
void launch_masked_scatter_kernel(
const TensorBase &self, const TensorBase &mask,
const TensorBase &maskPrefixSum, const TensorBase &source) {
if (mask.scalar_type() == kBool) {
masked_scatter_cuda_impl<bool>(self, mask, maskPrefixSum, source);
} else {
masked_scatter_cuda_impl<uint8_t>(self, mask, maskPrefixSum, source);
}
}
template <typename scalar_t>
void flip_kernel_impl(TensorIterator& iter) {
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
flip_kernel_impl<scalar_t>(sub_iter);
}
return;
}
char* const __restrict__ out_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
const char* const __restrict__ in_ptr = reinterpret_cast<const char*>(iter.data_ptr(1));
const auto offset_calc = make_offset_calculator<2, /*signed_strides=*/true>(iter);
auto loop = [=]C10_DEVICE(const int i) {
const auto offsets = offset_calc.get(i);
// offsets can be negative here, but it's fine
scalar_t* const __restrict__ out_data = reinterpret_cast<scalar_t*>(out_ptr + offsets[0]);
const scalar_t* const __restrict__ in_data = reinterpret_cast<const scalar_t*>(in_ptr + offsets[1]);
*out_data = *in_data;
};
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop);
}
void flip_kernel(TensorIterator& iter, const bool quantized) {
if (quantized) {
AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(iter.dtype(), "flip_quantized_cuda",
[&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
flip_kernel_impl<dtype>(iter);
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(), "flip_cuda",
[&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
flip_kernel_impl<dtype>(iter);
});
}
}
REGISTER_DISPATCH(index_stub, &index_kernel);
REGISTER_DISPATCH(index_fill_stub, &index_fill_kernel);
REGISTER_DISPATCH(index_copy_stub, &index_copy_kernel);
REGISTER_DISPATCH(index_put_stub, &index_put_kernel);
REGISTER_DISPATCH(put_stub, &put_kernel);
REGISTER_DISPATCH(take_stub, &take_kernel);
REGISTER_DISPATCH(flip_stub, &flip_kernel);
REGISTER_CUDA_DISPATCH(index_put_kernel_quantized_stub, &index_put_kernel_quantized_cuda);
} // namespace at::native
|
f120c624f070cf0ed9f38ac942267b5c939b292d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <c10/util/Exception.h>
#include <THH/THHAtomics.cuh>
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at {
namespace native {
using namespace at::cuda::detail;
namespace {
template <typename scalar_t, typename accscalar_t>
__device__ inline int64_t get_intervals(
accscalar_t sample,
int64_t index,
int64_t inputSize,
int64_t outputSize,
int64_t poolSize) {
accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) /
static_cast<accscalar_t>(outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return static_cast<int64_t>((index + sample) * alpha) - \
static_cast<int64_t>(sample * alpha);
}
}
template <typename scalar_t>
__global__ void fractional_max_pool3d_out_frame(
PackedTensorAccessor64<scalar_t, 5> input,
PackedTensorAccessor64<scalar_t, 5> output,
PackedTensorAccessor64<int64_t, 5> indices,
PackedTensorAccessor64<scalar_t, 3> samples,
int64_t poolSizeT, int64_t poolSizeH, int64_t poolSizeW) {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
// Output (t, h, w) point that this thread is responsible for
int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int64_t plane = blockIdx.y;
int64_t batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.size(2) * output.size(3) *
output.size(4)){
int64_t outputT = ourOutputPoint / (output.size(3) *
output.size(4));
int64_t outputH = (ourOutputPoint / output.size(4)) %
output.size(3);
int64_t outputW = ourOutputPoint % output.size(4);
int64_t poolT = get_intervals<scalar_t,accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][0]),
outputT, input.size(2), output.size(2), poolSizeT);
int64_t poolH = get_intervals<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][1]),
outputH, input.size(3), output.size(3), poolSizeH);
int64_t poolW = get_intervals<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][2]),
outputW, input.size(4), output.size(4), poolSizeW);
scalar_t maxVal = at::numeric_limits<scalar_t>::lowest();
int64_t maxIndex = -1;
for(int64_t t = poolT; t < poolT + poolSizeT; ++ t) {
for (int64_t h = poolH; h < poolH + poolSizeH; ++h) {
if(poolSizeW < 2 || poolSizeW > 7) {
for (int64_t w = poolW; w < poolW + poolSizeW; ++w) {
scalar_t val = input[batch][plane][t][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal) {
maxIndex = t * input.size(3) *
input.size(4) + h * input.size(4) + w;
maxVal = val;
}
}
} else {
for (int64_t i = 0; i < poolSizeW; ++i) {
int64_t w = i + poolW;
scalar_t val = input[batch][plane][t][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal) {
maxIndex = t * input.size(3) * input.size(4) +
h * input.size(4) + w;
maxVal = val;
}
}
}
}
}
assert(maxVal != at::numeric_limits<scalar_t>::lowest());
assert(maxIndex != -1);
indices[batch][plane][outputT][outputH][outputW] = maxIndex;
output[batch][plane][outputT][outputH][outputW] = maxVal;
}
}
template <typename scalar_t>
__global__ void fractional_max_pool3d_backward_out_frame(
PackedTensorAccessor64<scalar_t, 5> gradInput,
PackedTensorAccessor64<scalar_t, 5> gradOutput,
PackedTensorAccessor64<int64_t, 5> indices) {
// Output (h, w) point that this thread is responsible for
int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int64_t plane = blockIdx.y;
int64_t batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.size(2) *
gradOutput.size(3) * gradOutput.size(4)) {
int64_t outputW = ourOutputPoint % gradOutput.size(4);
int64_t outputH = (ourOutputPoint / gradOutput.size(4)) %
gradOutput.size(3);
int64_t outputT = ourOutputPoint / (gradOutput.size(3) *
gradOutput.size(4));
int64_t index = indices[batch][plane][outputT][outputH][outputW];
assert(index >= 0);
int64_t inputW = index % gradInput.size(4);
int64_t inputH = (index / gradInput.size(4)) %
gradInput.size(3);
int64_t inputT = index / (gradInput.size(3) *
gradInput.size(4));
assert(inputT < gradInput.size(2));
gpuAtomicAdd(
&gradInput[batch][plane][inputT][inputH][inputW],
gradOutput[batch][plane][outputT][outputH][outputW]
);
}
}
void fractional_max_pool3d_out_cuda_template(
Tensor& output,
Tensor& indices,
const Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const Tensor& randomSamples) {
int64_t planeDim = 0;
int64_t dimt = 1;
int64_t dimh = 2;
int64_t dimw = 3;
int64_t numBatch = 1;
int64_t outputT = output_size[0];
int64_t outputH = output_size[1];
int64_t outputW = output_size[2];
int64_t poolSizeT = pool_size[0];
int64_t poolSizeH = pool_size[1];
int64_t poolSizeW = pool_size[2];
int64_t ndims = input.ndimension();
TORCH_CHECK(
input.numel() != 0 && (ndims == 4 || ndims == 5),
"fractional_max_pool3d_out_cuda_template(): ",
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: ",
ndims);
if (ndims == 5) {
numBatch = input.size(0);
planeDim++;
dimt++;
dimh++;
dimw++;
}
/* sizes */
int64_t numPlanes = input.size(planeDim);
int64_t inputT = input.size(dimt);
int64_t inputH = input.size(dimh);
int64_t inputW = input.size(dimw);
TORCH_CHECK(
outputT + poolSizeT - 1 < inputT,
"fractional_max_pool3d_out_cuda_template(): ",
"pool time (", poolSizeT, ") too large relative to input time (",
inputT, ")");
TORCH_CHECK(
outputH + poolSizeH - 1 < inputH,
"fractional_max_pool3d_out_cuda_template(): ",
"pool height (", poolSizeH, ") too large relative to input height (",
inputH, ")");
TORCH_CHECK(
outputW + poolSizeW - 1 < inputW,
"fractional_max_pool3d_out_cuda_template(): ",
"pool width (", poolSizeW, ") too large relative to input width (",
inputW, ")");
if (ndims == 4) {
/* resize output */
output.resize_({numPlanes, outputT, outputH, outputW});
/* indices will contain the locations for each output point */
indices.resize_({numPlanes, outputT, outputH, outputW});
} else {
/* resize output */
output.resize_({numBatch, numPlanes, outputT, outputH, outputW});
/* indices will contain the locations for each output point */
indices.resize_({numBatch, numPlanes, outputT, outputH, outputW});
}
auto output_ = output;
auto indices_ = indices;
auto input_ = input;
if(ndims == 4) {
output_ = output_.reshape({1, numPlanes, outputT, outputH, outputW});
indices_ = indices_.reshape({1, numPlanes, outputT, outputH, outputW});
input_ = input_.reshape({1, numPlanes, inputT, inputH, inputW});
}
// block is limited to 4 warps
// grid handles overflow per each plane
int64_t outputPlaneSize = output_.size(2) *
output_.size(3) * output_.size(4);
dim3 grid(
(outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
input_.size(1),
input_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(),
"fractional_max_pool3d_out_frame",
[&]{
hipLaunchKernelGGL(( fractional_max_pool3d_out_frame<scalar_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input_.packed_accessor64<scalar_t, 5>(),
output_.packed_accessor64<scalar_t, 5>(),
indices_.packed_accessor64<int64_t, 5>(),
randomSamples.packed_accessor64<scalar_t, 3>(),
poolSizeT, poolSizeH, poolSizeW
);
}
);
AT_CUDA_CHECK(hipGetLastError());
}
void fractional_max_pool3d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef pool_size /* unused */,
IntArrayRef output_size,
const Tensor& indices) {
int64_t dimt = 1;
int64_t dimh = 2;
int64_t dimw = 3;
int64_t outputT = output_size[0];
int64_t outputH = output_size[1];
int64_t outputW = output_size[2];
int64_t ndims = input.ndimension();
if (ndims == 5) {
dimt++;
dimh++;
dimw++;
}
/* sizes */
int64_t inputT = input.size(dimt);
int64_t inputH = input.size(dimh);
int64_t inputW = input.size(dimw);
TORCH_CHECK(
outputT == gradOutput.size(dimt),
"fractional_max_pool3d_backward_out_cuda_template(): ",
"gradOutput time unexpected"
);
TORCH_CHECK(
outputH == gradOutput.size(dimh),
"fractional_max_pool3d_backward_out_cuda_template(): ",
"gradOutput height unexpected"
);
TORCH_CHECK(
outputW == gradOutput.size(dimw),
"fractional_max_pool3d_backward_out_cuda_template(): ",
"gradOutput width unexpected"
);
/* resize */
gradInput.resize_as_(input);
gradInput.zero_();
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
auto indices_ = indices;
if(ndims == 4) {
gradInput_ = gradInput_.reshape({1, gradInput.size(0), inputT,
inputH, inputW});
gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputT,
outputH, outputW});
indices_ = indices_.reshape({1, indices.size(0), outputT, outputH,
outputW});
}
/* backprop */
// block is limited to 4 warps
// grid handles overflow per each plane
int64_t outputPlaneSize = gradOutput_.size(2) *
gradOutput_.size(3) * gradOutput_.size(4);
dim3 grid(
(outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
gradInput_.size(1),
gradInput_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
gradOutput.scalar_type(),
"fractional_max_pool3d_backward_out_frame",
[&] {
hipLaunchKernelGGL(( fractional_max_pool3d_backward_out_frame<scalar_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradInput_.packed_accessor64<scalar_t, 5>(),
gradOutput_.packed_accessor64<scalar_t, 5>(),
indices_.packed_accessor64<int64_t, 5>()
);
}
);
AT_CUDA_CHECK(hipGetLastError());
}
}// namespace
std::tuple<Tensor&, Tensor&> fractional_max_pool3d_out_cuda(
at::Tensor& output,
at::Tensor& indices,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& randomSamples) {
fractional_max_pool3d_out_cuda_template(
output,
indices,
input,
pool_size,
output_size,
randomSamples
);
return std::tuple<Tensor&, Tensor&>(output, indices);
}
std::tuple<Tensor, Tensor> fractional_max_pool3d_cuda(
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& randomSamples) {
Tensor output = at::empty({0}, input.options());
Tensor indices = at::empty({0}, input.options().dtype(kLong));
fractional_max_pool3d_out_cuda_template(
output,
indices,
input,
pool_size,
output_size,
randomSamples
);
return std::tuple<Tensor, Tensor>(output, indices);
}
Tensor& fractional_max_pool3d_backward_out_cuda(
at::Tensor& gradInput,
const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices) {
fractional_max_pool3d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
pool_size,
output_size,
indices
);
return gradInput;
}
Tensor fractional_max_pool3d_backward_cuda(
const at::Tensor& gradOutput,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices) {
Tensor gradInput = at::empty({0}, input.options());
fractional_max_pool3d_backward_out_cuda_template(
gradInput,
gradOutput,
input,
pool_size,
output_size,
indices
);
return gradInput;
}
}// native
}// at
| f120c624f070cf0ed9f38ac942267b5c939b292d.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <c10/util/Exception.h>
#include <THC/THCAtomics.cuh>
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at {
namespace native {
using namespace at::cuda::detail;
namespace {
template <typename scalar_t, typename accscalar_t>
__device__ inline int64_t get_intervals(
accscalar_t sample,
int64_t index,
int64_t inputSize,
int64_t outputSize,
int64_t poolSize) {
accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) /
static_cast<accscalar_t>(outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return static_cast<int64_t>((index + sample) * alpha) - \
static_cast<int64_t>(sample * alpha);
}
}
template <typename scalar_t>
__global__ void fractional_max_pool3d_out_frame(
PackedTensorAccessor64<scalar_t, 5> input,
PackedTensorAccessor64<scalar_t, 5> output,
PackedTensorAccessor64<int64_t, 5> indices,
PackedTensorAccessor64<scalar_t, 3> samples,
int64_t poolSizeT, int64_t poolSizeH, int64_t poolSizeW) {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
// Output (t, h, w) point that this thread is responsible for
int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int64_t plane = blockIdx.y;
int64_t batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.size(2) * output.size(3) *
output.size(4)){
int64_t outputT = ourOutputPoint / (output.size(3) *
output.size(4));
int64_t outputH = (ourOutputPoint / output.size(4)) %
output.size(3);
int64_t outputW = ourOutputPoint % output.size(4);
int64_t poolT = get_intervals<scalar_t,accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][0]),
outputT, input.size(2), output.size(2), poolSizeT);
int64_t poolH = get_intervals<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][1]),
outputH, input.size(3), output.size(3), poolSizeH);
int64_t poolW = get_intervals<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][2]),
outputW, input.size(4), output.size(4), poolSizeW);
scalar_t maxVal = at::numeric_limits<scalar_t>::lowest();
int64_t maxIndex = -1;
for(int64_t t = poolT; t < poolT + poolSizeT; ++ t) {
for (int64_t h = poolH; h < poolH + poolSizeH; ++h) {
if(poolSizeW < 2 || poolSizeW > 7) {
for (int64_t w = poolW; w < poolW + poolSizeW; ++w) {
scalar_t val = input[batch][plane][t][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal) {
maxIndex = t * input.size(3) *
input.size(4) + h * input.size(4) + w;
maxVal = val;
}
}
} else {
for (int64_t i = 0; i < poolSizeW; ++i) {
int64_t w = i + poolW;
scalar_t val = input[batch][plane][t][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal) {
maxIndex = t * input.size(3) * input.size(4) +
h * input.size(4) + w;
maxVal = val;
}
}
}
}
}
assert(maxVal != at::numeric_limits<scalar_t>::lowest());
assert(maxIndex != -1);
indices[batch][plane][outputT][outputH][outputW] = maxIndex;
output[batch][plane][outputT][outputH][outputW] = maxVal;
}
}
template <typename scalar_t>
__global__ void fractional_max_pool3d_backward_out_frame(
PackedTensorAccessor64<scalar_t, 5> gradInput,
PackedTensorAccessor64<scalar_t, 5> gradOutput,
PackedTensorAccessor64<int64_t, 5> indices) {
// Output (h, w) point that this thread is responsible for
int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int64_t plane = blockIdx.y;
int64_t batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.size(2) *
gradOutput.size(3) * gradOutput.size(4)) {
int64_t outputW = ourOutputPoint % gradOutput.size(4);
int64_t outputH = (ourOutputPoint / gradOutput.size(4)) %
gradOutput.size(3);
int64_t outputT = ourOutputPoint / (gradOutput.size(3) *
gradOutput.size(4));
int64_t index = indices[batch][plane][outputT][outputH][outputW];
assert(index >= 0);
int64_t inputW = index % gradInput.size(4);
int64_t inputH = (index / gradInput.size(4)) %
gradInput.size(3);
int64_t inputT = index / (gradInput.size(3) *
gradInput.size(4));
assert(inputT < gradInput.size(2));
gpuAtomicAdd(
&gradInput[batch][plane][inputT][inputH][inputW],
gradOutput[batch][plane][outputT][outputH][outputW]
);
}
}
void fractional_max_pool3d_out_cuda_template(
Tensor& output,
Tensor& indices,
const Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const Tensor& randomSamples) {
int64_t planeDim = 0;
int64_t dimt = 1;
int64_t dimh = 2;
int64_t dimw = 3;
int64_t numBatch = 1;
int64_t outputT = output_size[0];
int64_t outputH = output_size[1];
int64_t outputW = output_size[2];
int64_t poolSizeT = pool_size[0];
int64_t poolSizeH = pool_size[1];
int64_t poolSizeW = pool_size[2];
int64_t ndims = input.ndimension();
TORCH_CHECK(
input.numel() != 0 && (ndims == 4 || ndims == 5),
"fractional_max_pool3d_out_cuda_template(): ",
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: ",
ndims);
if (ndims == 5) {
numBatch = input.size(0);
planeDim++;
dimt++;
dimh++;
dimw++;
}
/* sizes */
int64_t numPlanes = input.size(planeDim);
int64_t inputT = input.size(dimt);
int64_t inputH = input.size(dimh);
int64_t inputW = input.size(dimw);
TORCH_CHECK(
outputT + poolSizeT - 1 < inputT,
"fractional_max_pool3d_out_cuda_template(): ",
"pool time (", poolSizeT, ") too large relative to input time (",
inputT, ")");
TORCH_CHECK(
outputH + poolSizeH - 1 < inputH,
"fractional_max_pool3d_out_cuda_template(): ",
"pool height (", poolSizeH, ") too large relative to input height (",
inputH, ")");
TORCH_CHECK(
outputW + poolSizeW - 1 < inputW,
"fractional_max_pool3d_out_cuda_template(): ",
"pool width (", poolSizeW, ") too large relative to input width (",
inputW, ")");
if (ndims == 4) {
/* resize output */
output.resize_({numPlanes, outputT, outputH, outputW});
/* indices will contain the locations for each output point */
indices.resize_({numPlanes, outputT, outputH, outputW});
} else {
/* resize output */
output.resize_({numBatch, numPlanes, outputT, outputH, outputW});
/* indices will contain the locations for each output point */
indices.resize_({numBatch, numPlanes, outputT, outputH, outputW});
}
auto output_ = output;
auto indices_ = indices;
auto input_ = input;
if(ndims == 4) {
output_ = output_.reshape({1, numPlanes, outputT, outputH, outputW});
indices_ = indices_.reshape({1, numPlanes, outputT, outputH, outputW});
input_ = input_.reshape({1, numPlanes, inputT, inputH, inputW});
}
// block is limited to 4 warps
// grid handles overflow per each plane
int64_t outputPlaneSize = output_.size(2) *
output_.size(3) * output_.size(4);
dim3 grid(
(outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
input_.size(1),
input_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(),
"fractional_max_pool3d_out_frame",
[&]{
fractional_max_pool3d_out_frame<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
input_.packed_accessor64<scalar_t, 5>(),
output_.packed_accessor64<scalar_t, 5>(),
indices_.packed_accessor64<int64_t, 5>(),
randomSamples.packed_accessor64<scalar_t, 3>(),
poolSizeT, poolSizeH, poolSizeW
);
}
);
AT_CUDA_CHECK(cudaGetLastError());
}
void fractional_max_pool3d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef pool_size /* unused */,
IntArrayRef output_size,
const Tensor& indices) {
int64_t dimt = 1;
int64_t dimh = 2;
int64_t dimw = 3;
int64_t outputT = output_size[0];
int64_t outputH = output_size[1];
int64_t outputW = output_size[2];
int64_t ndims = input.ndimension();
if (ndims == 5) {
dimt++;
dimh++;
dimw++;
}
/* sizes */
int64_t inputT = input.size(dimt);
int64_t inputH = input.size(dimh);
int64_t inputW = input.size(dimw);
TORCH_CHECK(
outputT == gradOutput.size(dimt),
"fractional_max_pool3d_backward_out_cuda_template(): ",
"gradOutput time unexpected"
);
TORCH_CHECK(
outputH == gradOutput.size(dimh),
"fractional_max_pool3d_backward_out_cuda_template(): ",
"gradOutput height unexpected"
);
TORCH_CHECK(
outputW == gradOutput.size(dimw),
"fractional_max_pool3d_backward_out_cuda_template(): ",
"gradOutput width unexpected"
);
/* resize */
gradInput.resize_as_(input);
gradInput.zero_();
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
auto indices_ = indices;
if(ndims == 4) {
gradInput_ = gradInput_.reshape({1, gradInput.size(0), inputT,
inputH, inputW});
gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputT,
outputH, outputW});
indices_ = indices_.reshape({1, indices.size(0), outputT, outputH,
outputW});
}
/* backprop */
// block is limited to 4 warps
// grid handles overflow per each plane
int64_t outputPlaneSize = gradOutput_.size(2) *
gradOutput_.size(3) * gradOutput_.size(4);
dim3 grid(
(outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
gradInput_.size(1),
gradInput_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
gradOutput.scalar_type(),
"fractional_max_pool3d_backward_out_frame",
[&] {
fractional_max_pool3d_backward_out_frame<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
gradInput_.packed_accessor64<scalar_t, 5>(),
gradOutput_.packed_accessor64<scalar_t, 5>(),
indices_.packed_accessor64<int64_t, 5>()
);
}
);
AT_CUDA_CHECK(cudaGetLastError());
}
}// namespace
std::tuple<Tensor&, Tensor&> fractional_max_pool3d_out_cuda(
at::Tensor& output,
at::Tensor& indices,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& randomSamples) {
fractional_max_pool3d_out_cuda_template(
output,
indices,
input,
pool_size,
output_size,
randomSamples
);
return std::tuple<Tensor&, Tensor&>(output, indices);
}
std::tuple<Tensor, Tensor> fractional_max_pool3d_cuda(
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& randomSamples) {
Tensor output = at::empty({0}, input.options());
Tensor indices = at::empty({0}, input.options().dtype(kLong));
fractional_max_pool3d_out_cuda_template(
output,
indices,
input,
pool_size,
output_size,
randomSamples
);
return std::tuple<Tensor, Tensor>(output, indices);
}
Tensor& fractional_max_pool3d_backward_out_cuda(
at::Tensor& gradInput,
const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices) {
fractional_max_pool3d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
pool_size,
output_size,
indices
);
return gradInput;
}
Tensor fractional_max_pool3d_backward_cuda(
const at::Tensor& gradOutput,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices) {
Tensor gradInput = at::empty({0}, input.options());
fractional_max_pool3d_backward_out_cuda_template(
gradInput,
gradOutput,
input,
pool_size,
output_size,
indices
);
return gradInput;
}
}// native
}// at
|
85fa8480c85e8b99062b6e0d97464f027af114a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
<% unless type_name == 'robject' %>
__global__ void <%="cumo_#{c_iter}_kernel"%>(char *p1, char *p2, char *p3, ssize_t s1, ssize_t s2, ssize_t s3, uint64_t n)
{
for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
*(dtype*)(p3 + (i * s3)) = m_pow(*(dtype*)(p1 + (i * s1)), *(dtype*)(p2 + (i * s2)));
}
}
__global__ void <%="cumo_#{c_iter}_int32_kernel"%>(char *p1, char *p2, char *p3, ssize_t s1, ssize_t s2, ssize_t s3, uint64_t n)
{
for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
*(dtype*)(p3 + (i * s3)) = m_pow_int(*(dtype*)(p1 + (i * s1)), *(int32_t*)(p2 + (i * s2)));
}
}
void <%="cumo_#{c_iter}_kernel_launch"%>(char *p1, char *p2, char *p3, ssize_t s1, ssize_t s2, ssize_t s3, uint64_t n)
{
size_t gridDim = get_gridDim(n);
size_t blockDim =hipLaunchKernelGGL(( get_blockDim(n);
<%="cumo_#{c_iter}_kernel"%>), dim3(gridDim), dim3(blockDim), 0, 0, p1,p2,p3,s1,s2,s3,n);
}
void <%="cumo_#{c_iter}_int32_kernel_launch"%>(char *p1, char *p2, char *p3, ssize_t s1, ssize_t s2, ssize_t s3, uint64_t n)
{
size_t gridDim = get_gridDim(n);
size_t blockDim =hipLaunchKernelGGL(( get_blockDim(n);
<%="cumo_#{c_iter}_int32_kernel"%>), dim3(gridDim), dim3(blockDim), 0, 0, p1,p2,p3,s1,s2,s3,n);
}
<% end %>
| 85fa8480c85e8b99062b6e0d97464f027af114a4.cu | <% unless type_name == 'robject' %>
__global__ void <%="cumo_#{c_iter}_kernel"%>(char *p1, char *p2, char *p3, ssize_t s1, ssize_t s2, ssize_t s3, uint64_t n)
{
for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
*(dtype*)(p3 + (i * s3)) = m_pow(*(dtype*)(p1 + (i * s1)), *(dtype*)(p2 + (i * s2)));
}
}
__global__ void <%="cumo_#{c_iter}_int32_kernel"%>(char *p1, char *p2, char *p3, ssize_t s1, ssize_t s2, ssize_t s3, uint64_t n)
{
for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
*(dtype*)(p3 + (i * s3)) = m_pow_int(*(dtype*)(p1 + (i * s1)), *(int32_t*)(p2 + (i * s2)));
}
}
void <%="cumo_#{c_iter}_kernel_launch"%>(char *p1, char *p2, char *p3, ssize_t s1, ssize_t s2, ssize_t s3, uint64_t n)
{
size_t gridDim = get_gridDim(n);
size_t blockDim = get_blockDim(n);
<%="cumo_#{c_iter}_kernel"%><<<gridDim, blockDim>>>(p1,p2,p3,s1,s2,s3,n);
}
void <%="cumo_#{c_iter}_int32_kernel_launch"%>(char *p1, char *p2, char *p3, ssize_t s1, ssize_t s2, ssize_t s3, uint64_t n)
{
size_t gridDim = get_gridDim(n);
size_t blockDim = get_blockDim(n);
<%="cumo_#{c_iter}_int32_kernel"%><<<gridDim, blockDim>>>(p1,p2,p3,s1,s2,s3,n);
}
<% end %>
|
52d7fde2d10859844f8dcd25d8c1e8bc44f01779.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zmergebicgstab3.cu normal z -> s, Tue Feb 9 16:05:43 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_s
// These routines merge multiple kernels from smergebicgstab into one
// The difference to smergedbicgstab2 is that the SpMV is not merged into the
// kernes. This results in higher flexibility at the price of lower performance.
/* -------------------------------------------------------------------------- */
__global__ void
magma_sbicgmerge1_kernel(
int n,
float * skp,
float * v,
float * r,
float * p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
float beta=skp[1];
float omega=skp[2];
if ( i<n ) {
p[i] = r[i] + beta * ( p[i] - omega * v[i] );
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
p = beta*p
p = p-omega*beta*v
p = p+r
-> p = r + beta * ( p - omega * v )
Arguments
---------
@param[in]
n int
dimension n
@param[in]
skp magmaFloat_ptr
set of scalar parameters
@param[in]
v magmaFloat_ptr
input vector v
@param[in]
r magmaFloat_ptr
input vector r
@param[in,out]
p magmaFloat_ptr
input/output vector p
@param[in]
queue magma_queue_t
queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sbicgmerge1(
magma_int_t n,
magmaFloat_ptr skp,
magmaFloat_ptr v,
magmaFloat_ptr r,
magmaFloat_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( n, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_sbicgmerge1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , n, skp, v, r, p );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_sbicgmerge2_kernel(
int n,
float * skp,
float * r,
float * v,
float * s )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
float alpha=skp[0];
if ( i < n ) {
s[i] = r[i] - alpha * v[i];
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
s=r
s=s-alpha*v
-> s = r - alpha * v
Arguments
---------
@param[in]
n int
dimension n
@param[in]
skp magmaFloat_ptr
set of scalar parameters
@param[in]
r magmaFloat_ptr
input vector r
@param[in]
v magmaFloat_ptr
input vector v
@param[out]
s magmaFloat_ptr
output vector s
@param[in]
queue magma_queue_t
queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sbicgmerge2(
magma_int_t n,
magmaFloat_ptr skp,
magmaFloat_ptr r,
magmaFloat_ptr v,
magmaFloat_ptr s,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( n, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_sbicgmerge2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , n, skp, r, v, s );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_sbicgmerge3_kernel(
int n,
float * skp,
float * p,
float * se,
float * t,
float * x,
float * r
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
float alpha=skp[0];
float omega=skp[2];
if ( i<n ) {
float s;
s = se[i];
x[i] = x[i] + alpha * p[i] + omega * s;
r[i] = s - omega * t[i];
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x=x+alpha*p
x=x+omega*s
r=s
r=r-omega*t
-> x = x + alpha * p + omega * s
-> r = s - omega * t
Arguments
---------
@param[in]
n int
dimension n
@param[in]
skp magmaFloat_ptr
set of scalar parameters
@param[in]
p magmaFloat_ptr
input p
@param[in]
s magmaFloat_ptr
input s
@param[in]
t magmaFloat_ptr
input t
@param[in,out]
x magmaFloat_ptr
input/output x
@param[in,out]
r magmaFloat_ptr
input/output r
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sbicgmerge3(
magma_int_t n,
magmaFloat_ptr skp,
magmaFloat_ptr p,
magmaFloat_ptr s,
magmaFloat_ptr t,
magmaFloat_ptr x,
magmaFloat_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( n, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_sbicgmerge3_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , n, skp, p, s, t, x, r );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_sbicgmerge4_kernel_1(
float * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i==0 ) {
float tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
__global__ void
magma_sbicgmerge4_kernel_2(
float * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i==0 ) {
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
__global__ void
magma_sbicgmerge4_kernel_3(
float * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i==0 ) {
float tmp1 = skp[4]/skp[3];
float tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
//skp[1] = skp[4]/skp[3] * skp[0] / skp[2];
}
}
/**
Purpose
-------
Performs some parameter operations for the BiCGSTAB with scalars on GPU.
Arguments
---------
@param[in]
type int
kernel type
@param[in,out]
skp magmaFloat_ptr
vector with parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sbicgmerge4(
magma_int_t type,
magmaFloat_ptr skp,
magma_queue_t queue )
{
dim3 Bs( 1 );
dim3 Gs( 1 );
if ( type == 1 )
hipLaunchKernelGGL(( magma_sbicgmerge4_kernel_1), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , skp );
else if ( type == 2 )
hipLaunchKernelGGL(( magma_sbicgmerge4_kernel_2), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , skp );
else if ( type == 3 )
hipLaunchKernelGGL(( magma_sbicgmerge4_kernel_3), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , skp );
else
printf("error: no kernel called\n");
return MAGMA_SUCCESS;
}
| 52d7fde2d10859844f8dcd25d8c1e8bc44f01779.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zmergebicgstab3.cu normal z -> s, Tue Feb 9 16:05:43 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_s
// These routines merge multiple kernels from smergebicgstab into one
// The difference to smergedbicgstab2 is that the SpMV is not merged into the
// kernes. This results in higher flexibility at the price of lower performance.
/* -------------------------------------------------------------------------- */
__global__ void
magma_sbicgmerge1_kernel(
int n,
float * skp,
float * v,
float * r,
float * p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
float beta=skp[1];
float omega=skp[2];
if ( i<n ) {
p[i] = r[i] + beta * ( p[i] - omega * v[i] );
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
p = beta*p
p = p-omega*beta*v
p = p+r
-> p = r + beta * ( p - omega * v )
Arguments
---------
@param[in]
n int
dimension n
@param[in]
skp magmaFloat_ptr
set of scalar parameters
@param[in]
v magmaFloat_ptr
input vector v
@param[in]
r magmaFloat_ptr
input vector r
@param[in,out]
p magmaFloat_ptr
input/output vector p
@param[in]
queue magma_queue_t
queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sbicgmerge1(
magma_int_t n,
magmaFloat_ptr skp,
magmaFloat_ptr v,
magmaFloat_ptr r,
magmaFloat_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( n, BLOCK_SIZE ) );
magma_sbicgmerge1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( n, skp, v, r, p );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_sbicgmerge2_kernel(
int n,
float * skp,
float * r,
float * v,
float * s )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
float alpha=skp[0];
if ( i < n ) {
s[i] = r[i] - alpha * v[i];
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
s=r
s=s-alpha*v
-> s = r - alpha * v
Arguments
---------
@param[in]
n int
dimension n
@param[in]
skp magmaFloat_ptr
set of scalar parameters
@param[in]
r magmaFloat_ptr
input vector r
@param[in]
v magmaFloat_ptr
input vector v
@param[out]
s magmaFloat_ptr
output vector s
@param[in]
queue magma_queue_t
queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sbicgmerge2(
magma_int_t n,
magmaFloat_ptr skp,
magmaFloat_ptr r,
magmaFloat_ptr v,
magmaFloat_ptr s,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( n, BLOCK_SIZE ) );
magma_sbicgmerge2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( n, skp, r, v, s );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_sbicgmerge3_kernel(
int n,
float * skp,
float * p,
float * se,
float * t,
float * x,
float * r
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
float alpha=skp[0];
float omega=skp[2];
if ( i<n ) {
float s;
s = se[i];
x[i] = x[i] + alpha * p[i] + omega * s;
r[i] = s - omega * t[i];
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x=x+alpha*p
x=x+omega*s
r=s
r=r-omega*t
-> x = x + alpha * p + omega * s
-> r = s - omega * t
Arguments
---------
@param[in]
n int
dimension n
@param[in]
skp magmaFloat_ptr
set of scalar parameters
@param[in]
p magmaFloat_ptr
input p
@param[in]
s magmaFloat_ptr
input s
@param[in]
t magmaFloat_ptr
input t
@param[in,out]
x magmaFloat_ptr
input/output x
@param[in,out]
r magmaFloat_ptr
input/output r
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sbicgmerge3(
magma_int_t n,
magmaFloat_ptr skp,
magmaFloat_ptr p,
magmaFloat_ptr s,
magmaFloat_ptr t,
magmaFloat_ptr x,
magmaFloat_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( n, BLOCK_SIZE ) );
magma_sbicgmerge3_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( n, skp, p, s, t, x, r );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_sbicgmerge4_kernel_1(
float * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i==0 ) {
float tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
__global__ void
magma_sbicgmerge4_kernel_2(
float * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i==0 ) {
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
__global__ void
magma_sbicgmerge4_kernel_3(
float * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i==0 ) {
float tmp1 = skp[4]/skp[3];
float tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
//skp[1] = skp[4]/skp[3] * skp[0] / skp[2];
}
}
/**
Purpose
-------
Performs some parameter operations for the BiCGSTAB with scalars on GPU.
Arguments
---------
@param[in]
type int
kernel type
@param[in,out]
skp magmaFloat_ptr
vector with parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sbicgmerge4(
magma_int_t type,
magmaFloat_ptr skp,
magma_queue_t queue )
{
dim3 Bs( 1 );
dim3 Gs( 1 );
if ( type == 1 )
magma_sbicgmerge4_kernel_1<<< Gs, Bs, 0, queue->cuda_stream() >>>( skp );
else if ( type == 2 )
magma_sbicgmerge4_kernel_2<<< Gs, Bs, 0, queue->cuda_stream() >>>( skp );
else if ( type == 3 )
magma_sbicgmerge4_kernel_3<<< Gs, Bs, 0, queue->cuda_stream() >>>( skp );
else
printf("error: no kernel called\n");
return MAGMA_SUCCESS;
}
|
d1b916104f65ed4c5123a481370d18c4ffaf1fe8.hip | // !!! This is a file automatically generated by hipify!!!
//#include "hip/hip_runtime.h"
//#include "device_launch_parameters.h"
#include <stdio.h>
#include<cuda.h>
#define n 5
__global__ void add(int *a, int *b, int *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
c[i] = a[i] + b[i];
}
}
int main(){
int a[n];
int b[n];
int i;
int c[n]= {0};
int* dev_a;
int* dev_b;
int* dev_c;
hipMalloc((void**)&dev_c, n * sizeof(int));
hipMalloc((void**)&dev_a, n * sizeof(int));
hipMalloc((void**)&dev_b, n * sizeof(int));
printf("\narray elements (1st):\n");
for(i=0;i<n;i++){
scanf("%d",&a[i]);
}
printf("\narray elements (2nd):\n");
for(i=0;i<n;i++){
scanf("%d",&b[i]);
}
hipMemcpy(dev_a, a, n * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, n* sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(n),dim3(1), 0, 0, dev_a, dev_b, dev_c);
hipMemcpy(c, dev_c, n* sizeof(int), hipMemcpyDeviceToHost);
printf("\nsum is\n");
for(i=0;i<n;i++){
printf("%d\n",c[i]);
}
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
hipDeviceReset();
return 0;
}
| d1b916104f65ed4c5123a481370d18c4ffaf1fe8.cu | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <stdio.h>
#include<cuda.h>
#define n 5
__global__ void add(int *a, int *b, int *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
c[i] = a[i] + b[i];
}
}
int main(){
int a[n];
int b[n];
int i;
int c[n]= {0};
int* dev_a;
int* dev_b;
int* dev_c;
cudaMalloc((void**)&dev_c, n * sizeof(int));
cudaMalloc((void**)&dev_a, n * sizeof(int));
cudaMalloc((void**)&dev_b, n * sizeof(int));
printf("\narray elements (1st):\n");
for(i=0;i<n;i++){
scanf("%d",&a[i]);
}
printf("\narray elements (2nd):\n");
for(i=0;i<n;i++){
scanf("%d",&b[i]);
}
cudaMemcpy(dev_a, a, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, n* sizeof(int), cudaMemcpyHostToDevice);
add<<<n,1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, n* sizeof(int), cudaMemcpyDeviceToHost);
printf("\nsum is\n");
for(i=0;i<n;i++){
printf("%d\n",c[i]);
}
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaDeviceReset();
return 0;
}
|
0a4f17d4bf20a0b37e2d8d66942c24250c746db0.hip | // !!! This is a file automatically generated by hipify!!!
#include "cfd.h"
#include <cmath>
int main(){
hipSetDevice(1);
hipDeviceSynchronize();
CFD cfd = CFD(10000, 10, 10);
float maxTime = 1;
cfd.step(maxTime);
return 0;
} | 0a4f17d4bf20a0b37e2d8d66942c24250c746db0.cu | #include "cfd.h"
#include <cmath>
int main(){
cudaSetDevice(1);
cudaDeviceSynchronize();
CFD cfd = CFD(10000, 10, 10);
float maxTime = 1;
cfd.step(maxTime);
return 0;
} |
c29aa1c4ec2d0bf63b39bcb75c045522813abed8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
__global__ void imageblur( int* inputImage, int* outputImage, int filterSize, double* filter, int imageRow, int imageCol){
int pixelx = blockIdx.x * blockDim.x + threadIdx.x;
int pixely = blockIdx.y * blockDim.y + threadIdx.y;
double blur_value = 0.0;
if (pixelx >= imageCol || pixely >= imageRow) {
return;
}
//multiply with blur kernel
for (int finalx = 0; finalx < filterSize; finalx++) {
for (int finaly = 0; finaly < filterSize; finaly++) {
int imagex = pixelx + finalx - filterSize / 2 ;
int imagey = pixely + finaly - filterSize / 2;
int imagePixel;
if(imagex < 0 || imagex >= imageCol || imagey < 0 || imagey >= imageRow){
imagePixel = 0;
} else {
imagePixel = inputImage[imagey*imageCol+imagex];
}
blur_value += (filter[finaly*filterSize+finalx] * imagePixel);
}
}
outputImage[pixely*imageCol+pixelx] = (int)(blur_value/15.0);
}
int main(int argc, char const *argv[]) {
int imagex = 3, imagey = 3;
int numberOfPixels = imagex*imagey*sizeof(int);
int *d_image = 0; int *d_blurImage = 0; double *d_filter = 0; //device
int *h_image = 0; int *h_blurImage = 0; double *h_filter = 0; //host
//malloc memory device and host
h_image = (int*)malloc(numberOfPixels);
hipMalloc((void**)&d_image, numberOfPixels);
h_blurImage = (int*)malloc(numberOfPixels);
hipMalloc((void**)&d_blurImage, numberOfPixels);
h_filter = (double*)malloc(9*sizeof(double));
hipMalloc((void**)&d_filter, 9*sizeof(double));
if(h_image == 0 || d_image == 0 || h_blurImage == 0 || d_blurImage == 0){
printf("Could not allocate memory");
return 1;
}
//Initialise Filter
h_filter[0] = 1.0; h_filter[1] = 2.0; h_filter[2] = 1.0;
h_filter[3] = 2.0; h_filter[4] = 3.0; h_filter[5] = 2.0;
h_filter[6] = 1.0; h_filter[7] = 2.0; h_filter[8] = 1.0;
// Randomly Initialize Image
srand(time(NULL));
for(int i = 0; i < (imagex*imagey); i++){
h_image[i] = (rand() % 256);
}
//Copy host memory to device
hipMemcpy( d_image, h_image, numberOfPixels, hipMemcpyHostToDevice);
hipMemcpy( d_filter, h_filter, 9*sizeof(double), hipMemcpyHostToDevice);
const dim3 blockSize(4,4,1);
const dim3 gridSize(imagex/blockSize.x+1,imagey/blockSize.y+1,1);
//Call
hipLaunchKernelGGL(( imageblur), dim3(gridSize), dim3(blockSize), 0, 0, d_image, d_blurImage, 3, d_filter, imagey, imagex);
//copy blurred image to host
hipMemcpy(h_blurImage, d_blurImage, numberOfPixels, hipMemcpyDeviceToHost);
printf("Image : \n");
for(int i = 0; i < imagex; i++){
for(int j = 0; j < imagey; j++){
printf("%d ", h_image[i*imagex + j]);
}
printf("\n");
}
printf("Blur Image: \n");
for(int i = 0; i < imagex; i++){
for(int j = 0; j < imagey; j++){
printf("%d ", h_blurImage[i*imagex + j]);
}
printf("\n");
}
//Clean Memory
free(h_image); free(h_blurImage); free(h_filter);
hipFree(d_image); hipFree(d_blurImage); hipFree(d_filter);
return 0;
}
| c29aa1c4ec2d0bf63b39bcb75c045522813abed8.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
__global__ void imageblur( int* inputImage, int* outputImage, int filterSize, double* filter, int imageRow, int imageCol){
int pixelx = blockIdx.x * blockDim.x + threadIdx.x;
int pixely = blockIdx.y * blockDim.y + threadIdx.y;
double blur_value = 0.0;
if (pixelx >= imageCol || pixely >= imageRow) {
return;
}
//multiply with blur kernel
for (int finalx = 0; finalx < filterSize; finalx++) {
for (int finaly = 0; finaly < filterSize; finaly++) {
int imagex = pixelx + finalx - filterSize / 2 ;
int imagey = pixely + finaly - filterSize / 2;
int imagePixel;
if(imagex < 0 || imagex >= imageCol || imagey < 0 || imagey >= imageRow){
imagePixel = 0;
} else {
imagePixel = inputImage[imagey*imageCol+imagex];
}
blur_value += (filter[finaly*filterSize+finalx] * imagePixel);
}
}
outputImage[pixely*imageCol+pixelx] = (int)(blur_value/15.0);
}
int main(int argc, char const *argv[]) {
int imagex = 3, imagey = 3;
int numberOfPixels = imagex*imagey*sizeof(int);
int *d_image = 0; int *d_blurImage = 0; double *d_filter = 0; //device
int *h_image = 0; int *h_blurImage = 0; double *h_filter = 0; //host
//malloc memory device and host
h_image = (int*)malloc(numberOfPixels);
cudaMalloc((void**)&d_image, numberOfPixels);
h_blurImage = (int*)malloc(numberOfPixels);
cudaMalloc((void**)&d_blurImage, numberOfPixels);
h_filter = (double*)malloc(9*sizeof(double));
cudaMalloc((void**)&d_filter, 9*sizeof(double));
if(h_image == 0 || d_image == 0 || h_blurImage == 0 || d_blurImage == 0){
printf("Could not allocate memory");
return 1;
}
//Initialise Filter
h_filter[0] = 1.0; h_filter[1] = 2.0; h_filter[2] = 1.0;
h_filter[3] = 2.0; h_filter[4] = 3.0; h_filter[5] = 2.0;
h_filter[6] = 1.0; h_filter[7] = 2.0; h_filter[8] = 1.0;
// Randomly Initialize Image
srand(time(NULL));
for(int i = 0; i < (imagex*imagey); i++){
h_image[i] = (rand() % 256);
}
//Copy host memory to device
cudaMemcpy( d_image, h_image, numberOfPixels, cudaMemcpyHostToDevice);
cudaMemcpy( d_filter, h_filter, 9*sizeof(double), cudaMemcpyHostToDevice);
const dim3 blockSize(4,4,1);
const dim3 gridSize(imagex/blockSize.x+1,imagey/blockSize.y+1,1);
//Call
imageblur<<<gridSize, blockSize>>>(d_image, d_blurImage, 3, d_filter, imagey, imagex);
//copy blurred image to host
cudaMemcpy(h_blurImage, d_blurImage, numberOfPixels, cudaMemcpyDeviceToHost);
printf("Image : \n");
for(int i = 0; i < imagex; i++){
for(int j = 0; j < imagey; j++){
printf("%d ", h_image[i*imagex + j]);
}
printf("\n");
}
printf("Blur Image: \n");
for(int i = 0; i < imagex; i++){
for(int j = 0; j < imagey; j++){
printf("%d ", h_blurImage[i*imagex + j]);
}
printf("\n");
}
//Clean Memory
free(h_image); free(h_blurImage); free(h_filter);
cudaFree(d_image); cudaFree(d_blurImage); cudaFree(d_filter);
return 0;
}
|
ef353ad9e6cfdcca5bc78d83e016ee174ff970dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include "common/fmt.hpp"
#include "common/utils.hpp"
#include "hiprand/hiprand_kernel.h"
struct Vec {
double x, y, z; // position, also color (r,g,b)
__device__ __host__ Vec operator+(const Vec &b) const {
Vec v;
v.x = x+b.x;
v.y = y+b.y;
v.z = z+b.z;
return v;
}
__device__ __host__ Vec operator-(const Vec &b) const {
Vec v;
v.x = x - b.x;
v.y = y - b.y;
v.z = z - b.z;
return v;
/* return Vec(x-b.x,y-b.y,z-b.z); */
}
__device__ __host__ Vec operator*(double b) const {
Vec v;
v.x = x * b;
v.y = y * b;
v.z = z * b;
return v;
/* return Vec(x*b,y*b,z*b); */
}
__device__ __host__ Vec operator%(Vec&b){
Vec v;
v.x = y * b.z - z * b.y;
v.y = z * b.x - x * b.z;
v.z = x * b.y - y * b.x;
return v;
/* return Vec(y*b.z-z*b.y,z*b.x-x*b.z,x*b.y-y*b.x); */
}
__device__ __host__ Vec mult(const Vec &b) const {
Vec v;
v.x = x * b.x;
v.y = y * b.y;
v.z = z * b.z;
return v;
/* return Vec(x*b.x,y*b.y,z*b.z); */
}
__device__ __host__ Vec& norm() { return *this = *this * (1/sqrt(x*x+y*y+z*z)); }
__device__ __host__ double dot(const Vec &b) const { return x*b.x+y*b.y+z*b.z; } // cross:
};
struct Ray { Vec o, d; };
enum Refl_t { DIFF, SPEC, REFR }; // material types, used in radiance()
struct Sphere {
double rad; // radius
Vec p, e, c; // position, emission, color
Refl_t refl; // reflection type (DIFFuse, SPECular, REFRactive)
__device__ __host__ double intersect(const Ray &r) const { // returns distance, 0 if nohit
Vec op = p-r.o; // Solve t^2*d.d + 2*t*(o-p).d + (o-p).(o-p)-R^2 = 0
double t, eps=1e-4, b=op.dot(r.d), det=b*b-op.dot(op)+rad*rad;
if (det<0) return 0; else det=sqrt(det);
return (t=b-det)>eps ? t : ((t=b+det)>eps ? t : 0);
}
};
__device__ __host__ Vec new_vec(double x_=0, double y_=0, double z_=0) {
Vec v;
v.x = x_;
v.y = y_;
v.z = z_;
return v;
}
__device__ __host__ Ray new_ray(Vec o_, Vec d_) {
Ray r;
r.o = o_;
r.d = d_;
return r;
}
__device__ __host__ Sphere new_sphere(double rad_, Vec p_, Vec e_, Vec c_, Refl_t refl_) {
Sphere s;
s.rad = rad_;
s.p = p_;
s.e = e_;
s.c = c_;
s.refl = refl_;
return s;
}
// CUDA FUNCTIONS ===========================================================
#define NUM_SPHERES 9
static __constant__ Sphere SPHERES[NUM_SPHERES];
__device__ __host__ inline double clamp(double x) {
return x<0 ? 0 : x>1 ? 1 : x;
}
int toInt(double x) {
return int(pow(clamp(x),1/2.2)*255+.5);
}
__device__ bool intersect(const Ray &r, double &t, int &id) {
int n = NUM_SPHERES;
double d;
double inf = t = 1e20;
for(int i = int(n); i--;)
if( (d = SPHERES[i].intersect(r)) && d<t ) {
t=d;
id=i;
}
return t < inf;
}
#define STACK_SIZE 100
__device__ Vec linear_radiance(const Ray &r_, int depth_, hiprandState_t *Xi){
double t; // distance to intersection
int id=0; // id of intersected object
Ray r=r_;
int depth=depth_;
// L0 = Le0 + f0*(L1)
// = Le0 + f0*(Le1 + f1*L2)
// = Le0 + f0*(Le1 + f1*(Le2 + f2*(L3))
// = Le0 + f0*(Le1 + f1*(Le2 + f2*(Le3 + f3*(L4)))
// = ...
// = Le0 + f0*Le1 + f0*f1*Le2 + f0*f1*f2*Le3 + f0*f1*f2*f3*Le4 + ...
//
// So:
// F = 1
// while (1){
// L += F*Lei
// F *= fi
// }
Vec cl = new_vec(0,0,0); // accumulated color
Vec cf = new_vec(1,1,1); // accumulated reflectance
while (1){
if (!intersect(r, t, id)) return cl; // if miss, return black
const Sphere &obj = SPHERES[id]; // the hit object
Vec x=r.o+r.d*t, n=(x-obj.p).norm(), nl=n.dot(r.d)<0?n:n*-1, f=obj.c;
double p = f.x>f.y && f.x>f.z ? f.x : f.y>f.z ? f.y : f.z; // max refl
cl = cl + cf.mult(obj.e);
if (++depth>5) if (hiprand_uniform(Xi)<p) f=f*(1/p); else return cl; //R.R.
cf = cf.mult(f);
if (obj.refl == DIFF){ // Ideal DIFFUSE reflection
double r1=2*M_PI*hiprand_uniform(Xi), r2=hiprand_uniform(Xi), r2s=sqrt(r2);
Vec w=nl, u=((fabs(w.x)>.1? new_vec(0,1):new_vec(1))%w).norm(), v=w%u;
Vec d = (u*cos(r1)*r2s + v*sin(r1)*r2s + w*sqrt(1-r2)).norm();
//return obj.e + f.mult(radiance(Ray(x,d),depth,Xi));
r = new_ray(x,d);
continue;
} else if (obj.refl == SPEC){ // Ideal SPECULAR reflection
//return obj.e + f.mult(radiance(Ray(x,r.d-n*2*n.dot(r.d)),depth,Xi));
r = new_ray(x,r.d-n*2*n.dot(r.d));
continue;
}
Ray reflRay = new_ray(x, r.d-n*2*n.dot(r.d)); // Ideal dielectric REFRACTION
bool into = n.dot(nl)>0; // Ray from outside going in?
double nc=1, nt=1.5, nnt=into?nc/nt:nt/nc, ddn=r.d.dot(nl), cos2t;
if ((cos2t=1-nnt*nnt*(1-ddn*ddn))<0){ // Total internal reflection
//return obj.e + f.mult(radiance(reflRay,depth,Xi));
r = reflRay;
continue;
}
Vec tdir = (r.d*nnt - n*((into?1:-1)*(ddn*nnt+sqrt(cos2t)))).norm();
double a=nt-nc, b=nt+nc, R0=a*a/(b*b), c = 1-(into?-ddn:tdir.dot(n));
double Re=R0+(1-R0)*c*c*c*c*c,Tr=1-Re,P=.25+.5*Re,RP=Re/P,TP=Tr/(1-P);
// return obj.e + f.mult(hiprand_uniform(Xi)<P ?
// radiance(reflRay, depth,Xi)*RP:
// radiance(Ray(x,tdir),depth,Xi)*TP);
if (hiprand_uniform(Xi)<P){
cf = cf*RP;
r = reflRay;
} else {
cf = cf*TP;
r = new_ray(x,tdir);
}
continue;
}
}
__global__ void calc_pixel(Vec *out, int samps) {
// Calculates a single pixel in the final image
// Returns a color vector that is later written to the final image.
hiprandState_t state;
int w=1024, h=768;
Ray cam = new_ray(new_vec(50,52,295.6), new_vec(0,-0.042612,-1).norm()); // cam pos, dir
Vec cx = new_vec(w*.5135/h), cy = (cx%cam.d).norm()*.5135;
int t = blockIdx.x * blockDim.x + threadIdx.x;
int y = t/w;
int x = t%w;
hiprand_init(t, t, 0, &state);
//skipahead(y*y*y, &state);
if (t < w*h) {
int i = (h-y-1) * w + x;
Vec res = out[i];
for (int sy = 0; sy < 2; sy++) { // 2x2 subpixel rows
for (int sx = 0; sx < 2; sx++) { // 2x2 subpixel cols
Vec r = new_vec();
for (int s = 0; s < samps; s++) {
double r1 = 2*hiprand_uniform(&state), dx=r1<1 ? sqrt(r1)-1: 1-sqrt(2-r1);
double r2 = 2*hiprand_uniform(&state), dy=r2<1 ? sqrt(r2)-1: 1-sqrt(2-r2);
Vec d = cx*( ( (sx+.5 + dx)/2 + x)/w - .5) +
cy*( ( (sy+.5 + dy)/2 + y)/h - .5) + cam.d;
d = d.norm();
Vec res = linear_radiance(new_ray(cam.o+d*140,d),0, &state);
r = r + res * (1./samps);
} // Camera rays are pushed ^^^^^ forward to start in interior
out[i] = out[i] + new_vec(clamp(r.x),clamp(r.y),clamp(r.z))*.25;
}
}
}
}
int main(int argc, char *argv[]) {
float BLOCK_SIZE = 512;
timer_start("Starting program."); //@@ start a timer
Sphere *spheres = (Sphere *)malloc(NUM_SPHERES * sizeof(Sphere));
spheres[0] = new_sphere(1e5, new_vec( 1e5+1,40.8,81.6), new_vec(),new_vec(.75,.25,.25),DIFF);//Left
spheres[1] = new_sphere(1e5, new_vec(-1e5+99,40.8,81.6), new_vec(),new_vec(.25,.25,.75),DIFF);//Rght
spheres[2] = new_sphere(1e5, new_vec(50,40.8, 1e5), new_vec(),new_vec(.75,.75,.75),DIFF);//Back
spheres[3] = new_sphere(1e5, new_vec(50,40.8,-1e5+170), new_vec(),new_vec(), DIFF);//Frnt
spheres[4] = new_sphere(1e5, new_vec(50, 1e5, 81.6), new_vec(),new_vec(.75,.75,.75),DIFF);//Botm
spheres[5] = new_sphere(1e5, new_vec(50,-1e5+81.6,81.6), new_vec(),new_vec(.75,.75,.75),DIFF);//Top
spheres[6] = new_sphere(16.5, new_vec(27,16.5,47), new_vec(),new_vec(1,1,1)*.999, SPEC);//Mirr
spheres[7] = new_sphere(16.5, new_vec(73,16.5,78), new_vec(),new_vec(1,1,1)*.999, REFR);//Glas
spheres[8] = new_sphere(600, new_vec(50,681.6-.27,81.6), new_vec(12,12,12), new_vec(), DIFF);//Lite
// Copy the spheres to constant memory
hipMemcpyToSymbol(SPHERES, spheres, NUM_SPHERES * sizeof(Sphere));
int w=1024, h=768; // # samples
int samps = argc==2 ? atoi(argv[1])/4 : 250;
Vec *host_out = (Vec *)malloc(sizeof(Vec) * w * h);
Vec *device_out;
hipMalloc((void **) &device_out, sizeof(Vec) * w * h);
printf("This is Chris's 1-D optimization.\n");
printf("Render starting!\nBlock size is %i\n", int(BLOCK_SIZE));
dim3 grid(ceil((w*h)/BLOCK_SIZE), 1, 1);
dim3 block(BLOCK_SIZE, 1, 1);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipLaunchKernelGGL(( calc_pixel), dim3(grid), dim3(block), 0, 0, device_out, samps);
hipDeviceSynchronize();
hipEventRecord(stop);
hipMemcpy(host_out, device_out, sizeof(Vec) * w * h, hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
hipFree(device_out);
float milliseconds;
hipEventElapsedTime(&milliseconds, start, stop);
printf("Image rendered in %f milliseconds!\n", milliseconds);
FILE *f = fopen("image.ppm", "w"); // Write image to PPM file.
fprintf(f, "P3\n%d %d\n%d\n", w, h, 255);
for (int i=0; i<w*h; i++)
fprintf(f,"%d %d %d ", toInt(host_out[i].x), toInt(host_out[i].y), toInt(host_out[i].z));
fclose(f);
free(host_out);
free(spheres);
timer_stop();
return 0;
}
| ef353ad9e6cfdcca5bc78d83e016ee174ff970dc.cu | #include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include "common/fmt.hpp"
#include "common/utils.hpp"
#include "curand_kernel.h"
struct Vec {
double x, y, z; // position, also color (r,g,b)
__device__ __host__ Vec operator+(const Vec &b) const {
Vec v;
v.x = x+b.x;
v.y = y+b.y;
v.z = z+b.z;
return v;
}
__device__ __host__ Vec operator-(const Vec &b) const {
Vec v;
v.x = x - b.x;
v.y = y - b.y;
v.z = z - b.z;
return v;
/* return Vec(x-b.x,y-b.y,z-b.z); */
}
__device__ __host__ Vec operator*(double b) const {
Vec v;
v.x = x * b;
v.y = y * b;
v.z = z * b;
return v;
/* return Vec(x*b,y*b,z*b); */
}
__device__ __host__ Vec operator%(Vec&b){
Vec v;
v.x = y * b.z - z * b.y;
v.y = z * b.x - x * b.z;
v.z = x * b.y - y * b.x;
return v;
/* return Vec(y*b.z-z*b.y,z*b.x-x*b.z,x*b.y-y*b.x); */
}
__device__ __host__ Vec mult(const Vec &b) const {
Vec v;
v.x = x * b.x;
v.y = y * b.y;
v.z = z * b.z;
return v;
/* return Vec(x*b.x,y*b.y,z*b.z); */
}
__device__ __host__ Vec& norm() { return *this = *this * (1/sqrt(x*x+y*y+z*z)); }
__device__ __host__ double dot(const Vec &b) const { return x*b.x+y*b.y+z*b.z; } // cross:
};
struct Ray { Vec o, d; };
enum Refl_t { DIFF, SPEC, REFR }; // material types, used in radiance()
struct Sphere {
double rad; // radius
Vec p, e, c; // position, emission, color
Refl_t refl; // reflection type (DIFFuse, SPECular, REFRactive)
__device__ __host__ double intersect(const Ray &r) const { // returns distance, 0 if nohit
Vec op = p-r.o; // Solve t^2*d.d + 2*t*(o-p).d + (o-p).(o-p)-R^2 = 0
double t, eps=1e-4, b=op.dot(r.d), det=b*b-op.dot(op)+rad*rad;
if (det<0) return 0; else det=sqrt(det);
return (t=b-det)>eps ? t : ((t=b+det)>eps ? t : 0);
}
};
__device__ __host__ Vec new_vec(double x_=0, double y_=0, double z_=0) {
Vec v;
v.x = x_;
v.y = y_;
v.z = z_;
return v;
}
__device__ __host__ Ray new_ray(Vec o_, Vec d_) {
Ray r;
r.o = o_;
r.d = d_;
return r;
}
__device__ __host__ Sphere new_sphere(double rad_, Vec p_, Vec e_, Vec c_, Refl_t refl_) {
Sphere s;
s.rad = rad_;
s.p = p_;
s.e = e_;
s.c = c_;
s.refl = refl_;
return s;
}
// CUDA FUNCTIONS ===========================================================
#define NUM_SPHERES 9
static __constant__ Sphere SPHERES[NUM_SPHERES];
__device__ __host__ inline double clamp(double x) {
return x<0 ? 0 : x>1 ? 1 : x;
}
int toInt(double x) {
return int(pow(clamp(x),1/2.2)*255+.5);
}
__device__ bool intersect(const Ray &r, double &t, int &id) {
int n = NUM_SPHERES;
double d;
double inf = t = 1e20;
for(int i = int(n); i--;)
if( (d = SPHERES[i].intersect(r)) && d<t ) {
t=d;
id=i;
}
return t < inf;
}
#define STACK_SIZE 100
__device__ Vec linear_radiance(const Ray &r_, int depth_, curandState *Xi){
double t; // distance to intersection
int id=0; // id of intersected object
Ray r=r_;
int depth=depth_;
// L0 = Le0 + f0*(L1)
// = Le0 + f0*(Le1 + f1*L2)
// = Le0 + f0*(Le1 + f1*(Le2 + f2*(L3))
// = Le0 + f0*(Le1 + f1*(Le2 + f2*(Le3 + f3*(L4)))
// = ...
// = Le0 + f0*Le1 + f0*f1*Le2 + f0*f1*f2*Le3 + f0*f1*f2*f3*Le4 + ...
//
// So:
// F = 1
// while (1){
// L += F*Lei
// F *= fi
// }
Vec cl = new_vec(0,0,0); // accumulated color
Vec cf = new_vec(1,1,1); // accumulated reflectance
while (1){
if (!intersect(r, t, id)) return cl; // if miss, return black
const Sphere &obj = SPHERES[id]; // the hit object
Vec x=r.o+r.d*t, n=(x-obj.p).norm(), nl=n.dot(r.d)<0?n:n*-1, f=obj.c;
double p = f.x>f.y && f.x>f.z ? f.x : f.y>f.z ? f.y : f.z; // max refl
cl = cl + cf.mult(obj.e);
if (++depth>5) if (curand_uniform(Xi)<p) f=f*(1/p); else return cl; //R.R.
cf = cf.mult(f);
if (obj.refl == DIFF){ // Ideal DIFFUSE reflection
double r1=2*M_PI*curand_uniform(Xi), r2=curand_uniform(Xi), r2s=sqrt(r2);
Vec w=nl, u=((fabs(w.x)>.1? new_vec(0,1):new_vec(1))%w).norm(), v=w%u;
Vec d = (u*cos(r1)*r2s + v*sin(r1)*r2s + w*sqrt(1-r2)).norm();
//return obj.e + f.mult(radiance(Ray(x,d),depth,Xi));
r = new_ray(x,d);
continue;
} else if (obj.refl == SPEC){ // Ideal SPECULAR reflection
//return obj.e + f.mult(radiance(Ray(x,r.d-n*2*n.dot(r.d)),depth,Xi));
r = new_ray(x,r.d-n*2*n.dot(r.d));
continue;
}
Ray reflRay = new_ray(x, r.d-n*2*n.dot(r.d)); // Ideal dielectric REFRACTION
bool into = n.dot(nl)>0; // Ray from outside going in?
double nc=1, nt=1.5, nnt=into?nc/nt:nt/nc, ddn=r.d.dot(nl), cos2t;
if ((cos2t=1-nnt*nnt*(1-ddn*ddn))<0){ // Total internal reflection
//return obj.e + f.mult(radiance(reflRay,depth,Xi));
r = reflRay;
continue;
}
Vec tdir = (r.d*nnt - n*((into?1:-1)*(ddn*nnt+sqrt(cos2t)))).norm();
double a=nt-nc, b=nt+nc, R0=a*a/(b*b), c = 1-(into?-ddn:tdir.dot(n));
double Re=R0+(1-R0)*c*c*c*c*c,Tr=1-Re,P=.25+.5*Re,RP=Re/P,TP=Tr/(1-P);
// return obj.e + f.mult(curand_uniform(Xi)<P ?
// radiance(reflRay, depth,Xi)*RP:
// radiance(Ray(x,tdir),depth,Xi)*TP);
if (curand_uniform(Xi)<P){
cf = cf*RP;
r = reflRay;
} else {
cf = cf*TP;
r = new_ray(x,tdir);
}
continue;
}
}
__global__ void calc_pixel(Vec *out, int samps) {
// Calculates a single pixel in the final image
// Returns a color vector that is later written to the final image.
curandState state;
int w=1024, h=768;
Ray cam = new_ray(new_vec(50,52,295.6), new_vec(0,-0.042612,-1).norm()); // cam pos, dir
Vec cx = new_vec(w*.5135/h), cy = (cx%cam.d).norm()*.5135;
int t = blockIdx.x * blockDim.x + threadIdx.x;
int y = t/w;
int x = t%w;
curand_init(t, t, 0, &state);
//skipahead(y*y*y, &state);
if (t < w*h) {
int i = (h-y-1) * w + x;
Vec res = out[i];
for (int sy = 0; sy < 2; sy++) { // 2x2 subpixel rows
for (int sx = 0; sx < 2; sx++) { // 2x2 subpixel cols
Vec r = new_vec();
for (int s = 0; s < samps; s++) {
double r1 = 2*curand_uniform(&state), dx=r1<1 ? sqrt(r1)-1: 1-sqrt(2-r1);
double r2 = 2*curand_uniform(&state), dy=r2<1 ? sqrt(r2)-1: 1-sqrt(2-r2);
Vec d = cx*( ( (sx+.5 + dx)/2 + x)/w - .5) +
cy*( ( (sy+.5 + dy)/2 + y)/h - .5) + cam.d;
d = d.norm();
Vec res = linear_radiance(new_ray(cam.o+d*140,d),0, &state);
r = r + res * (1./samps);
} // Camera rays are pushed ^^^^^ forward to start in interior
out[i] = out[i] + new_vec(clamp(r.x),clamp(r.y),clamp(r.z))*.25;
}
}
}
}
int main(int argc, char *argv[]) {
float BLOCK_SIZE = 512;
timer_start("Starting program."); //@@ start a timer
Sphere *spheres = (Sphere *)malloc(NUM_SPHERES * sizeof(Sphere));
spheres[0] = new_sphere(1e5, new_vec( 1e5+1,40.8,81.6), new_vec(),new_vec(.75,.25,.25),DIFF);//Left
spheres[1] = new_sphere(1e5, new_vec(-1e5+99,40.8,81.6), new_vec(),new_vec(.25,.25,.75),DIFF);//Rght
spheres[2] = new_sphere(1e5, new_vec(50,40.8, 1e5), new_vec(),new_vec(.75,.75,.75),DIFF);//Back
spheres[3] = new_sphere(1e5, new_vec(50,40.8,-1e5+170), new_vec(),new_vec(), DIFF);//Frnt
spheres[4] = new_sphere(1e5, new_vec(50, 1e5, 81.6), new_vec(),new_vec(.75,.75,.75),DIFF);//Botm
spheres[5] = new_sphere(1e5, new_vec(50,-1e5+81.6,81.6), new_vec(),new_vec(.75,.75,.75),DIFF);//Top
spheres[6] = new_sphere(16.5, new_vec(27,16.5,47), new_vec(),new_vec(1,1,1)*.999, SPEC);//Mirr
spheres[7] = new_sphere(16.5, new_vec(73,16.5,78), new_vec(),new_vec(1,1,1)*.999, REFR);//Glas
spheres[8] = new_sphere(600, new_vec(50,681.6-.27,81.6), new_vec(12,12,12), new_vec(), DIFF);//Lite
// Copy the spheres to constant memory
cudaMemcpyToSymbol(SPHERES, spheres, NUM_SPHERES * sizeof(Sphere));
int w=1024, h=768; // # samples
int samps = argc==2 ? atoi(argv[1])/4 : 250;
Vec *host_out = (Vec *)malloc(sizeof(Vec) * w * h);
Vec *device_out;
cudaMalloc((void **) &device_out, sizeof(Vec) * w * h);
printf("This is Chris's 1-D optimization.\n");
printf("Render starting!\nBlock size is %i\n", int(BLOCK_SIZE));
dim3 grid(ceil((w*h)/BLOCK_SIZE), 1, 1);
dim3 block(BLOCK_SIZE, 1, 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
calc_pixel<<<grid, block>>>(device_out, samps);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaMemcpy(host_out, device_out, sizeof(Vec) * w * h, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
cudaFree(device_out);
float milliseconds;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Image rendered in %f milliseconds!\n", milliseconds);
FILE *f = fopen("image.ppm", "w"); // Write image to PPM file.
fprintf(f, "P3\n%d %d\n%d\n", w, h, 255);
for (int i=0; i<w*h; i++)
fprintf(f,"%d %d %d ", toInt(host_out[i].x), toInt(host_out[i].y), toInt(host_out[i].z));
fclose(f);
free(host_out);
free(spheres);
timer_stop();
return 0;
}
|
41b866dd06ce44dbb9d97bc7ae48acd9a32f488e.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <THH/THHAtomics.cuh>
#include "../utils.h"
#define BLOCK_SIZE 512
namespace kaolin {
template<typename T>
struct ScalarTypeToVec3Type { using type = float3; };
template <> struct ScalarTypeToVec3Type<float> { using type = float3; };
template <> struct ScalarTypeToVec3Type<double> { using type = double3; };
template<typename scalar_t, typename vector_t>
__device__ __forceinline__ vector_t make_vectorize(scalar_t x, scalar_t y, scalar_t z) {
vector_t output = {x, y, z};
return output;
}
template <>
__device__ __forceinline__ float3 make_vectorize<float, float3>(float x, float y, float z) {
return make_float3(x, y, z);
}
template <>
__device__ __forceinline__ double3 make_vectorize<double, double3>(double x, double y, double z) {
return make_double3(x, y, z);
}
template <typename scalar_t, typename vector_t>
__device__ scalar_t dot(vector_t a, vector_t b)
{
return a.x * b.x + a.y * b.y + a.z * b.z ;
}
template<typename scalar_t, typename vector_t>
__device__ scalar_t dot2(vector_t v)
{
return dot<scalar_t, vector_t>(v, v);
}
template<typename scalar_t>
__device__ scalar_t clamp(scalar_t x, scalar_t a, scalar_t b)
{
return max(a, min(b, x));
}
template<typename scalar_t, typename vector_t>
__device__ vector_t cross(vector_t a, vector_t b)
{
return make_vectorize<scalar_t, vector_t>(a.y * b.z - a.z * b.y,
a.z * b.x - a.x * b.z,
a.x * b.y - a.y * b.x);
}
template<typename scalar_t>
__device__ int sign(scalar_t a)
{
if (a <= 0) {return -1;}
else {return 1;}
}
template<typename scalar_t, typename vector_t>
__device__ vector_t operator* (vector_t a, scalar_t b)
{
return make_vectorize<scalar_t, vector_t>(a.x * b, a.y * b, a.z * b);
}
template<typename scalar_t, typename vector_t>
__device__ vector_t operator+ (vector_t a, scalar_t b)
{
return make_vectorize<scalar_t, vector_t>(a.x + b, a.y + b, a.z + b);
}
template<typename scalar_t, typename vector_t>
__device__ vector_t operator/ (vector_t a, scalar_t b)
{
return make_vectorize<scalar_t, vector_t>(a.x / b, a.y / b, a.z / b);
}
template<typename scalar_t, typename vector_t>
__device__ vector_t add(vector_t a, vector_t b)
{
return make_vectorize<scalar_t, vector_t>(a.x + b.x, a.y + b.y, a.z + b.z);
}
template<typename scalar_t, typename vector_t>
__device__ vector_t substract(vector_t a, vector_t b)
{
return make_vectorize<scalar_t, vector_t>(a.x - b.x, a.y - b.y, a.z - b.z);
}
template<typename scalar_t, typename vector_t>
__device__ int signage(vector_t a, vector_t b, vector_t c)
{
return sign<scalar_t>(dot<scalar_t, vector_t>(cross<scalar_t, vector_t>(a, b), c));
}
template<typename scalar_t, typename vector_t>
__device__ scalar_t edge_distance(vector_t a, vector_t b)
{
return dot2<scalar_t, vector_t>(substract<scalar_t, vector_t>(a * clamp<scalar_t>(dot<scalar_t, vector_t>(a, b) / dot2<scalar_t, vector_t> (a), 0.0, 1.0), b));
}
template<typename scalar_t, typename vector_t>
__device__ scalar_t plane_distance(vector_t a, vector_t b)
{
return dot<scalar_t, vector_t>(a, b) * dot<scalar_t, vector_t>(a, b) / dot2<scalar_t, vector_t>(a);
}
template<typename scalar_t, typename vector_t>
__device__ void compute_edge_backward(vector_t vab, vector_t pb, scalar_t* grad_input_va, scalar_t* grad_input_vb, scalar_t* grad_input_p, int64_t index, int64_t mesh_idx, scalar_t grad)
{
// variable used in forward pass
scalar_t l = dot<scalar_t, vector_t>(vab, pb);
scalar_t m = dot2<scalar_t, vector_t>(vab);// variable used in forward pass
scalar_t k = l / m;
scalar_t j = clamp<scalar_t>(k, 0.0, 1.0);
vector_t i = substract<scalar_t, vector_t>(vab * j, pb);
scalar_t h = dot2<scalar_t, vector_t>(i);
vector_t i_bar = make_vectorize<scalar_t, vector_t>(2 * i.x, 2 * i.y, 2 * i.z) * grad; // horizontal vector
scalar_t j_bar = dot<scalar_t, vector_t>(i_bar, vab);
scalar_t dj_dk = (k > 0 && k < 1) ? 1:0;
scalar_t k_bar = j_bar * dj_dk;
scalar_t m_bar = k_bar * (- l / (m * m));
scalar_t l_bar = k_bar * (1 / m);
// derivative of pb
vector_t dl_dpb = vab; //vertical vector
vector_t di_dpb = make_vectorize<scalar_t, vector_t>(-i_bar.x, -i_bar.y, -i_bar.z);
vector_t pb_bar = add<scalar_t, vector_t>(dl_dpb * l_bar, di_dpb); // vertical vector
vector_t p_bar = pb_bar;
vector_t dm_dvab = make_vectorize<scalar_t, vector_t>(vab.x, vab.y, vab.z) * 2; // horizontal vector
vector_t dl_dvab = make_vectorize<scalar_t, vector_t>(pb.x, pb.y, pb.z); // horizontal vector
vector_t di_dvab = make_vectorize<scalar_t, vector_t>(i_bar.x, i_bar.y, i_bar.z) * j; // horizontal vector
vector_t vab_bar = add<scalar_t, vector_t>(add<scalar_t, vector_t>(dm_dvab * m_bar, dl_dvab * l_bar), di_dvab); // horizontal vector
vector_t va_bar = vab_bar;
vector_t vb_bar = make_vectorize<scalar_t, vector_t>(-vab_bar.x - pb_bar.x, -vab_bar.y - pb_bar.y, -vab_bar.z - pb_bar.z);
grad_input_p[index * 3] = p_bar.x;
grad_input_p[index * 3 + 1] = p_bar.y;
grad_input_p[index * 3 + 2] = p_bar.z;
atomicAdd(&(grad_input_va[mesh_idx * 3]), va_bar.x);
atomicAdd(&(grad_input_va[mesh_idx * 3 + 1]), va_bar.y);
atomicAdd(&(grad_input_va[mesh_idx * 3 + 2]), va_bar.z);
atomicAdd(&(grad_input_vb[mesh_idx * 3]), vb_bar.x);
atomicAdd(&(grad_input_vb[mesh_idx * 3 + 1]), vb_bar.y);
atomicAdd(&(grad_input_vb[mesh_idx * 3 + 2]), vb_bar.z);
}
template<typename scalar_t, typename vector_t>
__global__ void UnbatchedTriangleDistanceKernel(
const scalar_t* points,
const scalar_t* verts_1,
const scalar_t* verts_2,
const scalar_t* verts_3,
int n,
int m,
scalar_t* result,
int64_t* result_i,
int* result_t)
{
const int batch = 512;
__shared__ scalar_t buf_1[batch * 3];
__shared__ scalar_t buf_2[batch * 3];
__shared__ scalar_t buf_3[batch * 3];
for (int k2 = 0; k2 < m; k2 += batch){
int end_k = min(m, k2 + batch) - k2;
for (int j = threadIdx.x; j < end_k * 3;j += blockDim.x){
buf_1[j] = verts_1[k2 * 3 + j];
buf_2[j] = verts_2[k2 * 3 + j];
buf_3[j] = verts_3[k2 * 3 + j];
}
__syncthreads();
for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y){ // for points in a batch
vector_t p = make_vectorize<scalar_t, vector_t>(points[j * 3 + 0], points[j * 3 + 1], points[j * 3 + 2]);
int64_t best_i = 0;
int best_t = 0;
scalar_t best = 10000;
for (int k = 0; k < end_k; k++){
vector_t v1 = make_vectorize<scalar_t, vector_t>(buf_1[k * 3 + 0], buf_1[k * 3 + 1], buf_1[k * 3 + 2]);
vector_t v2 = make_vectorize<scalar_t, vector_t>(buf_2[k * 3 + 0], buf_2[k * 3 + 1], buf_2[k * 3 + 2]);
vector_t v3 = make_vectorize<scalar_t, vector_t>(buf_3[k * 3 + 0], buf_3[k * 3 + 1], buf_3[k * 3 + 2]);
vector_t v21 = substract<scalar_t, vector_t>(v2, v1); //edge length between vertices 1 and 2
vector_t v32 = substract<scalar_t, vector_t>(v3, v2); //edge length between vertices 2 and 3
vector_t v13 = substract<scalar_t, vector_t>(v1, v3); //edge length between vertices 1 and 3
vector_t p1 = substract<scalar_t, vector_t>(p, v1); //distance between point and vertices 1
vector_t p2 = substract<scalar_t, vector_t>(p, v2); //distance between point and vertices 2
vector_t p3 = substract<scalar_t, vector_t>(p, v3); //distance between point and vertices 3
vector_t normal = cross<scalar_t, vector_t>(v21, v13); //normal of a triangle's surface
scalar_t sign_cond = signage<scalar_t, vector_t>(v21, normal, p1) + signage<scalar_t, vector_t>(v32, normal, p2) + signage<scalar_t, vector_t>(v13, normal, p3);
scalar_t dist = 100;
int type = 0;
if (sign_cond < 2.0) { //if sign condition is greater or equal to 2, which means the point is closest to the surface, to neither of the edges.
scalar_t dist1 = edge_distance<scalar_t, vector_t>(v21, p1);
scalar_t dist2 = edge_distance<scalar_t, vector_t>(v32, p2);
scalar_t dist3 = edge_distance<scalar_t, vector_t>(v13, p3);
if (dist1 <= dist2 && dist1 <= dist3){
dist = dist1;
type = 0;
} else if (dist2 <= dist1 && dist2 <= dist3){
dist = dist2;
type = 1;
} else {
dist = dist3;
type = 2;
}
} else {
dist = plane_distance<scalar_t, vector_t>(normal, p1);
type = 3;
}
if (k == 0 || dist < best){
best = dist;
best_i = k + k2;
best_t = type;
}
}
if (k2 == 0 || result[j] > best){
result[j] = best;
result_i[j] = best_i;
result_t[j] = best_t;
}
}
__syncthreads();
}
}
template<typename scalar_t, typename vector_t>
__global__ void UnbatchedTriangleDistanceBackwardKernel(
const scalar_t* grad_output,
const scalar_t* points,
const scalar_t* verts_1,
const scalar_t* verts_2,
const scalar_t* verts_3,
const int n, // num of points
const int m, // num of faces
const int64_t* idx,
const int* dist_type,
scalar_t* grad_input_p,
scalar_t* grad_input_v1,
scalar_t* grad_input_v2,
scalar_t* grad_input_v3)
{
for (int point_id = threadIdx.x + blockIdx.x * blockDim.x; point_id < n; point_id += blockDim.x) {
int type = dist_type[point_id];
int64_t mesh_idx = idx[point_id];
// printf("mesh id is %i\n", mesh_idx);
vector_t p = make_vectorize<scalar_t, vector_t>(points[point_id * 3], points[point_id * 3 + 1], points[point_id * 3 + 2]);
vector_t v1 = make_vectorize<scalar_t, vector_t>(verts_1[mesh_idx * 3], verts_1[mesh_idx * 3 + 1], verts_1[mesh_idx * 3 + 2]);
vector_t v2 = make_vectorize<scalar_t, vector_t>(verts_2[mesh_idx * 3], verts_2[mesh_idx * 3 + 1], verts_2[mesh_idx * 3 + 2]);
vector_t v3 = make_vectorize<scalar_t, vector_t>(verts_3[mesh_idx * 3], verts_3[mesh_idx * 3 + 1], verts_3[mesh_idx * 3 + 2]);
vector_t v21 = substract<scalar_t, vector_t>(v2, v1); //edge length between vertices 1 and 2
vector_t v32 = substract<scalar_t, vector_t>(v3, v2); //edge length between vertices 2 and 3
vector_t v13 = substract<scalar_t, vector_t>(v1, v3); //edge length between vertices 1 and 3
vector_t p1 = substract<scalar_t, vector_t>(p, v1); //distance between point and vertices 1
vector_t p2 = substract<scalar_t, vector_t>(p, v2); //distance between point and vertices 2
vector_t p3 = substract<scalar_t, vector_t>(p, v3); //distance between point and vertices 3
scalar_t grad = grad_output[point_id];
vector_t result;
// Calculate the grad_input_p part
if (type == 0) { // closest to edge v21
compute_edge_backward(v21, p1, grad_input_v2, grad_input_v1, grad_input_p, point_id, mesh_idx, grad);
} else if (type == 1) { // closest to edge v32
compute_edge_backward(v32, p2, grad_input_v3, grad_input_v2, grad_input_p, point_id, mesh_idx, grad);
} else if (type == 2) { // closest to edge v13
compute_edge_backward(v13, p3, grad_input_v1, grad_input_v3, grad_input_p, point_id, mesh_idx, grad);
} else if (type == 3) { // closest to the surface
// variable used in forward pass
vector_t i = cross<scalar_t, vector_t>(v21, v13);
scalar_t j = dot2<scalar_t, vector_t>(i);
scalar_t k = dot<scalar_t,vector_t>(i, p1);
scalar_t l = (k * k) / j;
scalar_t k_bar = ((2 * k) / j) * grad;
scalar_t j_bar = - ((k / j) * (k / j)) * grad;
vector_t dk_di = p1;
vector_t dj_di = make_vectorize<scalar_t, vector_t>(2 * i.x, 2 * i.y, 2 * i.z);
vector_t i_bar = add<scalar_t, vector_t>(dj_di * j_bar, dk_di * k_bar); // horizontal vector
vector_t dk_dp1 = make_vectorize<scalar_t, vector_t>(i.x, i.y, i.z); // horizontal vector
vector_t p1_bar = dk_dp1 * k_bar; // horizontal vector
vector_t di_dv21_x = make_vectorize<scalar_t, vector_t>(0, -v13.z, v13.y); // vertical vector
vector_t di_dv21_y = make_vectorize<scalar_t, vector_t>(v13.z, 0, -v13.x); // vertical vector
vector_t di_dv21_z = make_vectorize<scalar_t, vector_t>(-v13.y, v13.x, 0); // vertical vector
scalar_t v21_bar_x = dot<scalar_t, vector_t>(i_bar, di_dv21_x);
scalar_t v21_bar_y = dot<scalar_t, vector_t>(i_bar, di_dv21_y);
scalar_t v21_bar_z = dot<scalar_t, vector_t>(i_bar, di_dv21_z);
vector_t v21_bar = make_vectorize<scalar_t, vector_t>(v21_bar_x, v21_bar_y, v21_bar_z); // horizontal vector
vector_t di_dv13_x = make_vectorize<scalar_t, vector_t>(0, v21.z, -v21.y); // vertical vector
vector_t di_dv13_y = make_vectorize<scalar_t, vector_t>(-v21.z, 0, v21.x); // vertical vector
vector_t di_dv13_z = make_vectorize<scalar_t, vector_t>(v21.y, -v21.x, 0); // vertical vector
scalar_t v13_bar_x = dot<scalar_t, vector_t>(i_bar, di_dv13_x);
scalar_t v13_bar_y = dot<scalar_t, vector_t>(i_bar, di_dv13_y);
scalar_t v13_bar_z = dot<scalar_t, vector_t>(i_bar, di_dv13_z);
vector_t v13_bar = make_vectorize<scalar_t, vector_t>(v13_bar_x, v13_bar_y, v13_bar_z); // horizontal vector
vector_t v1_bar_v21 = make_vectorize<scalar_t, vector_t>(-v21_bar.x, -v21_bar.y, -v21_bar.z); // horizontal vector
vector_t v1_bar_v13 = make_vectorize<scalar_t, vector_t>(v13_bar.x, v13_bar.y, v13_bar.z); // horizontal vector
vector_t v1_bar_p1 = make_vectorize<scalar_t, vector_t>(-p1_bar.x, -p1_bar.y, -p1_bar.z); // horizontal vector
vector_t v1_bar = add<scalar_t, vector_t>(add<scalar_t, vector_t>(v1_bar_v13, v1_bar_v21), v1_bar_p1); // horizontal vector
vector_t v2_bar = make_vectorize<scalar_t, vector_t>(v21_bar.x, v21_bar.y, v21_bar.z); // horizontal vector
vector_t v3_bar = make_vectorize<scalar_t, vector_t>(-v13_bar.x, -v13_bar.y, -v13_bar.z); // horizontal vector
vector_t p_bar = p1_bar;
grad_input_p[point_id * 3] = p_bar.x;
grad_input_p[point_id * 3 + 1] = p_bar.y;
grad_input_p[point_id * 3 + 2] = p_bar.z;
atomicAdd(&(grad_input_v1[mesh_idx * 3]), v1_bar.x);
atomicAdd(&(grad_input_v1[mesh_idx * 3 + 1]), v1_bar.y);
atomicAdd(&(grad_input_v1[mesh_idx * 3 + 2]), v1_bar.z);
atomicAdd(&(grad_input_v2[mesh_idx * 3]), v2_bar.x);
atomicAdd(&(grad_input_v2[mesh_idx * 3 + 1]), v2_bar.y);
atomicAdd(&(grad_input_v2[mesh_idx * 3 + 2]), v2_bar.z);
atomicAdd(&(grad_input_v3[mesh_idx * 3]), v3_bar.x);
atomicAdd(&(grad_input_v3[mesh_idx * 3 + 1]), v3_bar.y);
atomicAdd(&(grad_input_v3[mesh_idx * 3 + 2]), v3_bar.z);
}
__syncthreads();
}
}
void unbatched_triangle_distance_forward_cuda_kernel_launcher(
const at::Tensor points,
const at::Tensor verts_1,
const at::Tensor verts_2,
const at::Tensor verts_3,
const at::Tensor dist1,
const at::Tensor idx1,
const at::Tensor type1) {
DISPATCH_NUM_TYPES(points.scalar_type(), scalar_t, "unbatched_triangle_distance", [&] {
using vector_t = ScalarTypeToVec3Type<scalar_t>::type;
hipLaunchKernelGGL(( UnbatchedTriangleDistanceKernel<scalar_t, vector_t>), dim3(dim3(32,16,1)),dim3(512), 0, 0,
points.data_ptr<scalar_t>(), verts_1.data_ptr<scalar_t>(), verts_2.data_ptr<scalar_t>(),
verts_3.data_ptr<scalar_t>(), points.size(0), verts_1.size(0), dist1.data_ptr<scalar_t>(),
idx1.data_ptr<int64_t>(), type1.data_ptr<int>());
});
}
void unbatched_triangle_distance_backward_cuda_kernel_launcher(
const at::Tensor grad_output,
const at::Tensor points,
const at::Tensor verts_1,
const at::Tensor verts_2,
const at::Tensor verts_3,
const at::Tensor idx,
const at::Tensor dist_type,
const at::Tensor grad_input_p,
const at::Tensor grad_input_v1,
const at::Tensor grad_input_v2,
const at::Tensor grad_input_v3) {
DISPATCH_NUM_TYPES(points.scalar_type(), scalar_t, "unbatched_triangle_distance", [&] {
using vector_t = ScalarTypeToVec3Type<scalar_t>::type;
int n = points.size(0);
int num_blocks = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
hipLaunchKernelGGL(( UnbatchedTriangleDistanceBackwardKernel<scalar_t, vector_t>), dim3(num_blocks), dim3(BLOCK_SIZE), 0, 0,
grad_output.data_ptr<scalar_t>(), points.data_ptr<scalar_t>(), verts_1.data_ptr<scalar_t>(),
verts_2.data_ptr<scalar_t>(), verts_3.data_ptr<scalar_t>(), points.size(0), verts_1.size(0),
idx.data_ptr<int64_t>(), dist_type.data_ptr<int>(), grad_input_p.data_ptr<scalar_t>(),
grad_input_v1.data_ptr<scalar_t>(), grad_input_v2.data_ptr<scalar_t>(), grad_input_v3.data_ptr<scalar_t>());
});
}
} // namespace kaolin
| 41b866dd06ce44dbb9d97bc7ae48acd9a32f488e.cu | // Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <THC/THCAtomics.cuh>
#include "../utils.h"
#define BLOCK_SIZE 512
namespace kaolin {
template<typename T>
struct ScalarTypeToVec3Type { using type = float3; };
template <> struct ScalarTypeToVec3Type<float> { using type = float3; };
template <> struct ScalarTypeToVec3Type<double> { using type = double3; };
template<typename scalar_t, typename vector_t>
__device__ __forceinline__ vector_t make_vectorize(scalar_t x, scalar_t y, scalar_t z) {
vector_t output = {x, y, z};
return output;
}
template <>
__device__ __forceinline__ float3 make_vectorize<float, float3>(float x, float y, float z) {
return make_float3(x, y, z);
}
template <>
__device__ __forceinline__ double3 make_vectorize<double, double3>(double x, double y, double z) {
return make_double3(x, y, z);
}
template <typename scalar_t, typename vector_t>
__device__ scalar_t dot(vector_t a, vector_t b)
{
return a.x * b.x + a.y * b.y + a.z * b.z ;
}
template<typename scalar_t, typename vector_t>
__device__ scalar_t dot2(vector_t v)
{
return dot<scalar_t, vector_t>(v, v);
}
template<typename scalar_t>
__device__ scalar_t clamp(scalar_t x, scalar_t a, scalar_t b)
{
return max(a, min(b, x));
}
template<typename scalar_t, typename vector_t>
__device__ vector_t cross(vector_t a, vector_t b)
{
return make_vectorize<scalar_t, vector_t>(a.y * b.z - a.z * b.y,
a.z * b.x - a.x * b.z,
a.x * b.y - a.y * b.x);
}
template<typename scalar_t>
__device__ int sign(scalar_t a)
{
if (a <= 0) {return -1;}
else {return 1;}
}
template<typename scalar_t, typename vector_t>
__device__ vector_t operator* (vector_t a, scalar_t b)
{
return make_vectorize<scalar_t, vector_t>(a.x * b, a.y * b, a.z * b);
}
template<typename scalar_t, typename vector_t>
__device__ vector_t operator+ (vector_t a, scalar_t b)
{
return make_vectorize<scalar_t, vector_t>(a.x + b, a.y + b, a.z + b);
}
template<typename scalar_t, typename vector_t>
__device__ vector_t operator/ (vector_t a, scalar_t b)
{
return make_vectorize<scalar_t, vector_t>(a.x / b, a.y / b, a.z / b);
}
template<typename scalar_t, typename vector_t>
__device__ vector_t add(vector_t a, vector_t b)
{
return make_vectorize<scalar_t, vector_t>(a.x + b.x, a.y + b.y, a.z + b.z);
}
template<typename scalar_t, typename vector_t>
__device__ vector_t substract(vector_t a, vector_t b)
{
return make_vectorize<scalar_t, vector_t>(a.x - b.x, a.y - b.y, a.z - b.z);
}
template<typename scalar_t, typename vector_t>
__device__ int signage(vector_t a, vector_t b, vector_t c)
{
return sign<scalar_t>(dot<scalar_t, vector_t>(cross<scalar_t, vector_t>(a, b), c));
}
template<typename scalar_t, typename vector_t>
__device__ scalar_t edge_distance(vector_t a, vector_t b)
{
return dot2<scalar_t, vector_t>(substract<scalar_t, vector_t>(a * clamp<scalar_t>(dot<scalar_t, vector_t>(a, b) / dot2<scalar_t, vector_t> (a), 0.0, 1.0), b));
}
template<typename scalar_t, typename vector_t>
__device__ scalar_t plane_distance(vector_t a, vector_t b)
{
return dot<scalar_t, vector_t>(a, b) * dot<scalar_t, vector_t>(a, b) / dot2<scalar_t, vector_t>(a);
}
template<typename scalar_t, typename vector_t>
__device__ void compute_edge_backward(vector_t vab, vector_t pb, scalar_t* grad_input_va, scalar_t* grad_input_vb, scalar_t* grad_input_p, int64_t index, int64_t mesh_idx, scalar_t grad)
{
// variable used in forward pass
scalar_t l = dot<scalar_t, vector_t>(vab, pb);
scalar_t m = dot2<scalar_t, vector_t>(vab);// variable used in forward pass
scalar_t k = l / m;
scalar_t j = clamp<scalar_t>(k, 0.0, 1.0);
vector_t i = substract<scalar_t, vector_t>(vab * j, pb);
scalar_t h = dot2<scalar_t, vector_t>(i);
vector_t i_bar = make_vectorize<scalar_t, vector_t>(2 * i.x, 2 * i.y, 2 * i.z) * grad; // horizontal vector
scalar_t j_bar = dot<scalar_t, vector_t>(i_bar, vab);
scalar_t dj_dk = (k > 0 && k < 1) ? 1:0;
scalar_t k_bar = j_bar * dj_dk;
scalar_t m_bar = k_bar * (- l / (m * m));
scalar_t l_bar = k_bar * (1 / m);
// derivative of pb
vector_t dl_dpb = vab; //vertical vector
vector_t di_dpb = make_vectorize<scalar_t, vector_t>(-i_bar.x, -i_bar.y, -i_bar.z);
vector_t pb_bar = add<scalar_t, vector_t>(dl_dpb * l_bar, di_dpb); // vertical vector
vector_t p_bar = pb_bar;
vector_t dm_dvab = make_vectorize<scalar_t, vector_t>(vab.x, vab.y, vab.z) * 2; // horizontal vector
vector_t dl_dvab = make_vectorize<scalar_t, vector_t>(pb.x, pb.y, pb.z); // horizontal vector
vector_t di_dvab = make_vectorize<scalar_t, vector_t>(i_bar.x, i_bar.y, i_bar.z) * j; // horizontal vector
vector_t vab_bar = add<scalar_t, vector_t>(add<scalar_t, vector_t>(dm_dvab * m_bar, dl_dvab * l_bar), di_dvab); // horizontal vector
vector_t va_bar = vab_bar;
vector_t vb_bar = make_vectorize<scalar_t, vector_t>(-vab_bar.x - pb_bar.x, -vab_bar.y - pb_bar.y, -vab_bar.z - pb_bar.z);
grad_input_p[index * 3] = p_bar.x;
grad_input_p[index * 3 + 1] = p_bar.y;
grad_input_p[index * 3 + 2] = p_bar.z;
atomicAdd(&(grad_input_va[mesh_idx * 3]), va_bar.x);
atomicAdd(&(grad_input_va[mesh_idx * 3 + 1]), va_bar.y);
atomicAdd(&(grad_input_va[mesh_idx * 3 + 2]), va_bar.z);
atomicAdd(&(grad_input_vb[mesh_idx * 3]), vb_bar.x);
atomicAdd(&(grad_input_vb[mesh_idx * 3 + 1]), vb_bar.y);
atomicAdd(&(grad_input_vb[mesh_idx * 3 + 2]), vb_bar.z);
}
template<typename scalar_t, typename vector_t>
__global__ void UnbatchedTriangleDistanceKernel(
const scalar_t* points,
const scalar_t* verts_1,
const scalar_t* verts_2,
const scalar_t* verts_3,
int n,
int m,
scalar_t* result,
int64_t* result_i,
int* result_t)
{
const int batch = 512;
__shared__ scalar_t buf_1[batch * 3];
__shared__ scalar_t buf_2[batch * 3];
__shared__ scalar_t buf_3[batch * 3];
for (int k2 = 0; k2 < m; k2 += batch){
int end_k = min(m, k2 + batch) - k2;
for (int j = threadIdx.x; j < end_k * 3;j += blockDim.x){
buf_1[j] = verts_1[k2 * 3 + j];
buf_2[j] = verts_2[k2 * 3 + j];
buf_3[j] = verts_3[k2 * 3 + j];
}
__syncthreads();
for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y){ // for points in a batch
vector_t p = make_vectorize<scalar_t, vector_t>(points[j * 3 + 0], points[j * 3 + 1], points[j * 3 + 2]);
int64_t best_i = 0;
int best_t = 0;
scalar_t best = 10000;
for (int k = 0; k < end_k; k++){
vector_t v1 = make_vectorize<scalar_t, vector_t>(buf_1[k * 3 + 0], buf_1[k * 3 + 1], buf_1[k * 3 + 2]);
vector_t v2 = make_vectorize<scalar_t, vector_t>(buf_2[k * 3 + 0], buf_2[k * 3 + 1], buf_2[k * 3 + 2]);
vector_t v3 = make_vectorize<scalar_t, vector_t>(buf_3[k * 3 + 0], buf_3[k * 3 + 1], buf_3[k * 3 + 2]);
vector_t v21 = substract<scalar_t, vector_t>(v2, v1); //edge length between vertices 1 and 2
vector_t v32 = substract<scalar_t, vector_t>(v3, v2); //edge length between vertices 2 and 3
vector_t v13 = substract<scalar_t, vector_t>(v1, v3); //edge length between vertices 1 and 3
vector_t p1 = substract<scalar_t, vector_t>(p, v1); //distance between point and vertices 1
vector_t p2 = substract<scalar_t, vector_t>(p, v2); //distance between point and vertices 2
vector_t p3 = substract<scalar_t, vector_t>(p, v3); //distance between point and vertices 3
vector_t normal = cross<scalar_t, vector_t>(v21, v13); //normal of a triangle's surface
scalar_t sign_cond = signage<scalar_t, vector_t>(v21, normal, p1) + signage<scalar_t, vector_t>(v32, normal, p2) + signage<scalar_t, vector_t>(v13, normal, p3);
scalar_t dist = 100;
int type = 0;
if (sign_cond < 2.0) { //if sign condition is greater or equal to 2, which means the point is closest to the surface, to neither of the edges.
scalar_t dist1 = edge_distance<scalar_t, vector_t>(v21, p1);
scalar_t dist2 = edge_distance<scalar_t, vector_t>(v32, p2);
scalar_t dist3 = edge_distance<scalar_t, vector_t>(v13, p3);
if (dist1 <= dist2 && dist1 <= dist3){
dist = dist1;
type = 0;
} else if (dist2 <= dist1 && dist2 <= dist3){
dist = dist2;
type = 1;
} else {
dist = dist3;
type = 2;
}
} else {
dist = plane_distance<scalar_t, vector_t>(normal, p1);
type = 3;
}
if (k == 0 || dist < best){
best = dist;
best_i = k + k2;
best_t = type;
}
}
if (k2 == 0 || result[j] > best){
result[j] = best;
result_i[j] = best_i;
result_t[j] = best_t;
}
}
__syncthreads();
}
}
template<typename scalar_t, typename vector_t>
__global__ void UnbatchedTriangleDistanceBackwardKernel(
const scalar_t* grad_output,
const scalar_t* points,
const scalar_t* verts_1,
const scalar_t* verts_2,
const scalar_t* verts_3,
const int n, // num of points
const int m, // num of faces
const int64_t* idx,
const int* dist_type,
scalar_t* grad_input_p,
scalar_t* grad_input_v1,
scalar_t* grad_input_v2,
scalar_t* grad_input_v3)
{
for (int point_id = threadIdx.x + blockIdx.x * blockDim.x; point_id < n; point_id += blockDim.x) {
int type = dist_type[point_id];
int64_t mesh_idx = idx[point_id];
// printf("mesh id is %i\n", mesh_idx);
vector_t p = make_vectorize<scalar_t, vector_t>(points[point_id * 3], points[point_id * 3 + 1], points[point_id * 3 + 2]);
vector_t v1 = make_vectorize<scalar_t, vector_t>(verts_1[mesh_idx * 3], verts_1[mesh_idx * 3 + 1], verts_1[mesh_idx * 3 + 2]);
vector_t v2 = make_vectorize<scalar_t, vector_t>(verts_2[mesh_idx * 3], verts_2[mesh_idx * 3 + 1], verts_2[mesh_idx * 3 + 2]);
vector_t v3 = make_vectorize<scalar_t, vector_t>(verts_3[mesh_idx * 3], verts_3[mesh_idx * 3 + 1], verts_3[mesh_idx * 3 + 2]);
vector_t v21 = substract<scalar_t, vector_t>(v2, v1); //edge length between vertices 1 and 2
vector_t v32 = substract<scalar_t, vector_t>(v3, v2); //edge length between vertices 2 and 3
vector_t v13 = substract<scalar_t, vector_t>(v1, v3); //edge length between vertices 1 and 3
vector_t p1 = substract<scalar_t, vector_t>(p, v1); //distance between point and vertices 1
vector_t p2 = substract<scalar_t, vector_t>(p, v2); //distance between point and vertices 2
vector_t p3 = substract<scalar_t, vector_t>(p, v3); //distance between point and vertices 3
scalar_t grad = grad_output[point_id];
vector_t result;
// Calculate the grad_input_p part
if (type == 0) { // closest to edge v21
compute_edge_backward(v21, p1, grad_input_v2, grad_input_v1, grad_input_p, point_id, mesh_idx, grad);
} else if (type == 1) { // closest to edge v32
compute_edge_backward(v32, p2, grad_input_v3, grad_input_v2, grad_input_p, point_id, mesh_idx, grad);
} else if (type == 2) { // closest to edge v13
compute_edge_backward(v13, p3, grad_input_v1, grad_input_v3, grad_input_p, point_id, mesh_idx, grad);
} else if (type == 3) { // closest to the surface
// variable used in forward pass
vector_t i = cross<scalar_t, vector_t>(v21, v13);
scalar_t j = dot2<scalar_t, vector_t>(i);
scalar_t k = dot<scalar_t,vector_t>(i, p1);
scalar_t l = (k * k) / j;
scalar_t k_bar = ((2 * k) / j) * grad;
scalar_t j_bar = - ((k / j) * (k / j)) * grad;
vector_t dk_di = p1;
vector_t dj_di = make_vectorize<scalar_t, vector_t>(2 * i.x, 2 * i.y, 2 * i.z);
vector_t i_bar = add<scalar_t, vector_t>(dj_di * j_bar, dk_di * k_bar); // horizontal vector
vector_t dk_dp1 = make_vectorize<scalar_t, vector_t>(i.x, i.y, i.z); // horizontal vector
vector_t p1_bar = dk_dp1 * k_bar; // horizontal vector
vector_t di_dv21_x = make_vectorize<scalar_t, vector_t>(0, -v13.z, v13.y); // vertical vector
vector_t di_dv21_y = make_vectorize<scalar_t, vector_t>(v13.z, 0, -v13.x); // vertical vector
vector_t di_dv21_z = make_vectorize<scalar_t, vector_t>(-v13.y, v13.x, 0); // vertical vector
scalar_t v21_bar_x = dot<scalar_t, vector_t>(i_bar, di_dv21_x);
scalar_t v21_bar_y = dot<scalar_t, vector_t>(i_bar, di_dv21_y);
scalar_t v21_bar_z = dot<scalar_t, vector_t>(i_bar, di_dv21_z);
vector_t v21_bar = make_vectorize<scalar_t, vector_t>(v21_bar_x, v21_bar_y, v21_bar_z); // horizontal vector
vector_t di_dv13_x = make_vectorize<scalar_t, vector_t>(0, v21.z, -v21.y); // vertical vector
vector_t di_dv13_y = make_vectorize<scalar_t, vector_t>(-v21.z, 0, v21.x); // vertical vector
vector_t di_dv13_z = make_vectorize<scalar_t, vector_t>(v21.y, -v21.x, 0); // vertical vector
scalar_t v13_bar_x = dot<scalar_t, vector_t>(i_bar, di_dv13_x);
scalar_t v13_bar_y = dot<scalar_t, vector_t>(i_bar, di_dv13_y);
scalar_t v13_bar_z = dot<scalar_t, vector_t>(i_bar, di_dv13_z);
vector_t v13_bar = make_vectorize<scalar_t, vector_t>(v13_bar_x, v13_bar_y, v13_bar_z); // horizontal vector
vector_t v1_bar_v21 = make_vectorize<scalar_t, vector_t>(-v21_bar.x, -v21_bar.y, -v21_bar.z); // horizontal vector
vector_t v1_bar_v13 = make_vectorize<scalar_t, vector_t>(v13_bar.x, v13_bar.y, v13_bar.z); // horizontal vector
vector_t v1_bar_p1 = make_vectorize<scalar_t, vector_t>(-p1_bar.x, -p1_bar.y, -p1_bar.z); // horizontal vector
vector_t v1_bar = add<scalar_t, vector_t>(add<scalar_t, vector_t>(v1_bar_v13, v1_bar_v21), v1_bar_p1); // horizontal vector
vector_t v2_bar = make_vectorize<scalar_t, vector_t>(v21_bar.x, v21_bar.y, v21_bar.z); // horizontal vector
vector_t v3_bar = make_vectorize<scalar_t, vector_t>(-v13_bar.x, -v13_bar.y, -v13_bar.z); // horizontal vector
vector_t p_bar = p1_bar;
grad_input_p[point_id * 3] = p_bar.x;
grad_input_p[point_id * 3 + 1] = p_bar.y;
grad_input_p[point_id * 3 + 2] = p_bar.z;
atomicAdd(&(grad_input_v1[mesh_idx * 3]), v1_bar.x);
atomicAdd(&(grad_input_v1[mesh_idx * 3 + 1]), v1_bar.y);
atomicAdd(&(grad_input_v1[mesh_idx * 3 + 2]), v1_bar.z);
atomicAdd(&(grad_input_v2[mesh_idx * 3]), v2_bar.x);
atomicAdd(&(grad_input_v2[mesh_idx * 3 + 1]), v2_bar.y);
atomicAdd(&(grad_input_v2[mesh_idx * 3 + 2]), v2_bar.z);
atomicAdd(&(grad_input_v3[mesh_idx * 3]), v3_bar.x);
atomicAdd(&(grad_input_v3[mesh_idx * 3 + 1]), v3_bar.y);
atomicAdd(&(grad_input_v3[mesh_idx * 3 + 2]), v3_bar.z);
}
__syncthreads();
}
}
void unbatched_triangle_distance_forward_cuda_kernel_launcher(
const at::Tensor points,
const at::Tensor verts_1,
const at::Tensor verts_2,
const at::Tensor verts_3,
const at::Tensor dist1,
const at::Tensor idx1,
const at::Tensor type1) {
DISPATCH_NUM_TYPES(points.scalar_type(), scalar_t, "unbatched_triangle_distance", [&] {
using vector_t = ScalarTypeToVec3Type<scalar_t>::type;
UnbatchedTriangleDistanceKernel<scalar_t, vector_t><<<dim3(32,16,1),512>>>(
points.data_ptr<scalar_t>(), verts_1.data_ptr<scalar_t>(), verts_2.data_ptr<scalar_t>(),
verts_3.data_ptr<scalar_t>(), points.size(0), verts_1.size(0), dist1.data_ptr<scalar_t>(),
idx1.data_ptr<int64_t>(), type1.data_ptr<int>());
});
}
void unbatched_triangle_distance_backward_cuda_kernel_launcher(
const at::Tensor grad_output,
const at::Tensor points,
const at::Tensor verts_1,
const at::Tensor verts_2,
const at::Tensor verts_3,
const at::Tensor idx,
const at::Tensor dist_type,
const at::Tensor grad_input_p,
const at::Tensor grad_input_v1,
const at::Tensor grad_input_v2,
const at::Tensor grad_input_v3) {
DISPATCH_NUM_TYPES(points.scalar_type(), scalar_t, "unbatched_triangle_distance", [&] {
using vector_t = ScalarTypeToVec3Type<scalar_t>::type;
int n = points.size(0);
int num_blocks = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
UnbatchedTriangleDistanceBackwardKernel<scalar_t, vector_t><<<num_blocks, BLOCK_SIZE>>>(
grad_output.data_ptr<scalar_t>(), points.data_ptr<scalar_t>(), verts_1.data_ptr<scalar_t>(),
verts_2.data_ptr<scalar_t>(), verts_3.data_ptr<scalar_t>(), points.size(0), verts_1.size(0),
idx.data_ptr<int64_t>(), dist_type.data_ptr<int>(), grad_input_p.data_ptr<scalar_t>(),
grad_input_v1.data_ptr<scalar_t>(), grad_input_v2.data_ptr<scalar_t>(), grad_input_v3.data_ptr<scalar_t>());
});
}
} // namespace kaolin
|
c9d4debb6f4203f5737a75dd2acc79b10ef80e51.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <distributed/distributed_manager.h>
#include <distributed/comms_mpi_gpudirect.h>
#include <distributed/comms_mpi_hostbuffer_stream.h>
#include <distributed/comms_visitors.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <thrust/remove.h>
#include <thrust/unique.h>
#include <thrust/binary_search.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust_wrapper.h>
#include <basic_types.h>
#include <error.h>
#include <util.h>
#include <types.h>
#include <iostream>
#include <sstream>
#include <fstream>
#include <assert.h>
#include "hip/hip_runtime.h"
#include "reorder_partition.h"
#include "amgx_types/util.h"
#include <algorithm>
#include <iostream> //debug only:
struct is_my_part : public thrust::unary_function<int, bool>
{
const int _my_part;
is_my_part(int my_part) : _my_part(my_part) { }
__host__ __device__
bool operator()(const int part)
{
return (part == _my_part);
}
};
using namespace std;
namespace amgx
{
static int insertDiagonals = 1;
template <typename index_type>
static __device__ __forceinline__
index_type internal_index(index_type i, index_type j, index_type k, index_type nx, index_type ny, index_type nz)
{
return k * (nx * ny) + j * nx + i;
}
template <typename index_type>
static __device__ __forceinline__
int64_t get_global_offset(index_type p, index_type q, index_type r, index_type P, index_type Q, index_type R, index_type num_rows)
{
int rank_id = r * (P * Q) + q * P + p;
return ((int64_t) rank_id) * ((int64_t) num_rows);
}
template <typename index_type>
__global__
void poisson7pt_count_row_len(index_type *row_len, index_type nx, index_type ny, index_type nz, index_type p, index_type q, index_type r, index_type P, index_type Q, index_type R, index_type num_rows)
{
for (int tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < num_rows ; tidx += blockDim.x * gridDim.x)
{
/* compute p,q,r from P,Q,R and myid */
int i = tidx % nx; // Position in x direction
int j = (( tidx - i) / nx) % ny; // Position in y
int k = ( tidx - i - nx * j) / ( nx * ny ); // Position in z
int substract = ((i == 0) && (p == 0));
substract += ((i == nx - 1) && (p == P - 1));
substract += ((j == 0) && (q == 0));
substract += ((j == ny - 1) && (q == Q - 1));
substract += ((k == 0) && (r == 0));
substract += ((k == nz - 1) && (r == R - 1));
// Store 7 in position (num_rows+1), such that row_len[num_rows+1] = 0
//substract = (tidx == num_rows+1) ? 7 : substract;
row_len[tidx] = 7 - substract;
}
}
template <typename index_type, typename mat_value_type>
__global__
void poisson7pt_set_col_values(const index_type *__restrict__ row_offsets, index_type *__restrict__ col_indices, mat_value_type *__restrict__ values, index_type *__restrict__ diag, int64_t *__restrict__ local_to_global, index_type nx, index_type ny, index_type nz, index_type p, index_type q, index_type r, index_type P, index_type Q, index_type R, index_type num_rows)
{
for (int row = threadIdx.x + blockIdx.x * blockDim.x; row < num_rows ; row += blockDim.x * gridDim.x)
{
/* compute p,q,r from P,Q,R and myid */
int i = row % nx; // Position in x direction
int j = (( row - i) / nx) % ny; // Position in y
int k = ( row - i - nx * j) / ( nx * ny ); // Position in z
int halo_offset = num_rows;
int pos = row_offsets[row];
// Diagonal element
diag[row] = pos;
col_indices[pos] = row;
values[pos++] = types::util<mat_value_type>::get_one() * 6.;
// ----------------------------
// Neighbor at position i-1
// ----------------------------
if (i)
{
// Has a i-1 neighbor, which is an internal node at position (i-1,j,k)
col_indices[pos] = internal_index(i - 1, j, k, nx, ny, nz);
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
}
else if (p)
{
// Has a i-1 neighbor, which is a halo node
int halo_index = halo_offset + k * ny + j;
col_indices[pos] = halo_index;
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
int64_t global_offset = get_global_offset(p - 1, q, r, P, Q, R, num_rows);
local_to_global[halo_index - num_rows] = global_offset + internal_index(nx - 1, j, k, nx, ny, nz);
}
if (p)
{
halo_offset += ny * nz;
}
// ----------------------------
// Neighbor at position i+1
// ----------------------------
if (i < nx - 1)
{
// Has i+1 neighbor, which is an internal node at position (i+1,j,k)
col_indices[pos] = internal_index(i + 1, j, k, nx, ny, nz);
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
}
else
{
if (p < P - 1)
{
// Has i+1 neighbor, which is a halo node
int halo_index = halo_offset + k * ny + j;
col_indices[pos] = halo_index;
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
int64_t global_offset = get_global_offset(p + 1, q, r, P, Q, R, num_rows);
local_to_global[halo_index - num_rows] = global_offset + internal_index(0, j, k, nx, ny, nz);
}
}
if (p < P - 1)
{
halo_offset += ny * nz;
}
// ----------------------------
// Neighbor at position j-1
// ----------------------------
if (j)
{
// Has a j-1 neighbor, which is an internal node at position (i,j-1,k)
col_indices[pos] = internal_index(i, j - 1, k, nx, ny, nz);
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
}
else if (q)
{
// Has a j-1 neighbor, which is a halo node
int halo_index = halo_offset + k * nx + i;
col_indices[pos] = halo_index;
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
int64_t global_offset = get_global_offset(p, q - 1, r, P, Q, R, num_rows);
local_to_global[halo_index - num_rows] = global_offset + internal_index(i, ny - 1, k, nx, ny, nz);
}
if (q)
{
halo_offset += nx * nz;
}
// ----------------------------
// Neighbor at position j+1
// ----------------------------
if (j < ny - 1)
{
// Has a j+1 neighbor, which is an internal node at position (i,j+1,k)
col_indices[pos] = internal_index(i, j + 1, k, nx, ny, nz);
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
}
else
{
if (q < Q - 1)
{
// Has a j+1 neighbor, which is a halo node
int halo_index = halo_offset + k * nx + i;
col_indices[pos] = halo_index;
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
int64_t global_offset = get_global_offset(p, q + 1, r, P, Q, R, num_rows);
local_to_global[halo_index - num_rows] = global_offset + internal_index(i, 0, k, nx, ny, nz);
}
}
if (q < Q - 1)
{
halo_offset += nx * nz;
}
// ----------------------------
// Neighbor at position k-1
// ----------------------------
if (k)
{
// Has a k-1 neighbor, which is an internal node at position (i,j,k-1)
col_indices[pos] = internal_index(i, j, k - 1, nx, ny, nz);
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
}
else if (r)
{
// Has a k-1 neighbor, which is a halo node
int halo_index = halo_offset + j * nx + i;
col_indices[pos] = halo_index;
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
int64_t global_offset = get_global_offset(p, q, r - 1, P, Q, R, num_rows);
local_to_global[halo_index - num_rows] = global_offset + internal_index(i, j, nz - 1, nx, ny, nz);
}
if (r)
{
halo_offset += nx * ny;
}
// ----------------------------
// Neighbor at position k+1
// ----------------------------
if (k < nz - 1)
{
// Has a k+1 neighbor, which is an internal node at position (i,j,k+1)
col_indices[pos] = internal_index(i, j, k + 1, nx, ny, nz);
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
}
else
{
if (r < R - 1)
{
// Has a k+1 neighbor, which is a halo node
int halo_index = halo_offset + j * nx + i;
col_indices[pos] = halo_index;
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
int64_t global_offset = get_global_offset(p, q, r + 1, P, Q, R, num_rows);
local_to_global[halo_index - num_rows] = global_offset + internal_index(i, j, 0, nx, ny, nz);
}
}
if (r < R - 1)
{
halo_offset += nx * ny;
}
}
}
template <typename mat_value_type>
__global__
void set_halo_cols_values(int *row_offsets, int *col_indices, mat_value_type *values, int n, int total_rows, int bsize)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < (total_rows - n) )
{
int offset = row_offsets[n + tid];
col_indices[offset] = n + tid;
#pragma unroll
for (int i = 0; i < bsize; i++)
{
values[offset * bsize + i] = types::util<mat_value_type>::get_one(); // This is arbitrary
}
tid += gridDim.x * blockDim.x;
}
}
template <typename mat_value_type>
__global__
void zero_copy_row_lengths_ids_offsets(int *d_old_row_offsets, int *root_row_offsets, int *d_row_ids, int n, int total_num_halos, mat_value_type *diag)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < n + total_num_halos)
{
int new_row_id = d_row_ids[tid];
if (tid < n)
{
int start = d_old_row_offsets[tid];
int row_length = d_old_row_offsets[tid + 1] - start; // zero-copy
if (diag != NULL) // will insert the diagonal
{
row_length++;
}
root_row_offsets[new_row_id] = row_length;
}
tid += gridDim.x * blockDim.x;
}
}
template< typename mat_value_type>
__global__
void ipc_consolidation_upload_matrix(int num_rows, int *row_ids, const int *old_row_offsets, int *new_row_offsets, const int *h_old_col_indices, int *new_col_indices, const mat_value_type *h_old_values, mat_value_type *new_values, const mat_value_type *h_old_diag, int bsize)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
while (row < num_rows)
{
int new_row = row_ids[row];
int src_base = old_row_offsets[row];
int dst_base = new_row_offsets[new_row];
// Insert the diagonal at the beginning of each row
if (h_old_diag != NULL)
{
new_col_indices[dst_base] = new_row;
#pragma unroll
for (int j = 0; j < bsize; j++)
{
new_values[dst_base * bsize + j] = h_old_diag[row * bsize + j];
}
// Increment dst_base by one
dst_base++;
}
int end = old_row_offsets[row + 1] - src_base;
for (int i = 0; i < end; i++)
{
int old_col = h_old_col_indices[src_base + i];
int new_col = row_ids[old_col];
new_col_indices[dst_base + i] = new_col;
#pragma unroll
for (int j = 0; j < bsize; j++)
{
new_values[ (dst_base + i)*bsize + j ] = h_old_values[ (src_base + i) * bsize + j ];
}
}
row += gridDim.x * blockDim.x;
}
}
template< typename mat_value_type>
__global__
void ipc_consolidation_replace_values(int num_rows, int *row_ids, const int *old_row_offsets, int *new_row_offsets, const mat_value_type *h_old_values, mat_value_type *new_values, const mat_value_type *h_old_diag, int bsize)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
while (row < num_rows)
{
int new_row = row_ids[row];
int src_base = old_row_offsets[row];
int dst_base = new_row_offsets[new_row];
// Insert the diagonal at the beginning of each row
if (h_old_diag != NULL)
{
#pragma unroll
for (int j = 0; j < bsize; j++)
{
new_values[dst_base * bsize + j] = h_old_diag[row * bsize + j];
}
// Increment dst_base by one
dst_base++;
}
int end = old_row_offsets[row + 1] - src_base;
for (int i = 0; i < end; i++)
{
#pragma unroll
for (int j = 0; j < bsize; j++)
{
new_values[ (dst_base + i)*bsize + j ] = h_old_values[ (src_base + i) * bsize + j ];
}
}
row += gridDim.x * blockDim.x;
}
}
__global__ void flag_halo_ids_kernel(INDEX_TYPE *flags, INDEX_TYPE *ids, INDEX_TYPE offset, INDEX_TYPE size, INDEX_TYPE upper)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < size)
{
flags[ids[idx] - offset] = 1;
idx += blockDim.x * gridDim.x;
}
}
__global__ void read_halo_ids_kernel(INDEX_TYPE *flags, INDEX_TYPE *ids, INDEX_TYPE offset, INDEX_TYPE size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < size)
{
ids[idx] = flags[ids[idx] - offset];
idx += blockDim.x * gridDim.x;
}
}
template<class T>
__global__ void reorder_vector_values(T *dst, const T *src, const INDEX_TYPE *map, INDEX_TYPE blocksize, INDEX_TYPE num_rows)
{
int row = blockIdx.x * (blockDim.x / blocksize) + threadIdx.x / blocksize; //vectorised by block size
int vec_id = threadIdx.x % blocksize;
if (threadIdx.x >= (blockDim.x / blocksize)*blocksize ) { return; }
while (row < num_rows)
{
dst[map[row]*blocksize + vec_id] = src[row * blocksize + vec_id];
row += gridDim.x * (blockDim.x / blocksize);
}
}
template<class T>
__global__ void inverse_reorder_vector_values(T *dst, T *src, INDEX_TYPE *map, INDEX_TYPE blocksize, INDEX_TYPE num_rows)
{
int row = blockIdx.x * (blockDim.x / blocksize) + threadIdx.x / blocksize;
int vec_id = threadIdx.x % blocksize;
if (threadIdx.x >= (blockDim.x / blocksize)*blocksize ) { return; }
while (row < num_rows)
{
dst[row * blocksize + vec_id] = src[map[row] * blocksize + vec_id];
row += gridDim.x * (blockDim.x / blocksize);
}
}
__global__ void remove_boundary_kernel(INDEX_TYPE *flags, INDEX_TYPE *maps, INDEX_TYPE size)
{
int element = blockIdx.x * blockDim.x + threadIdx.x;
while (element < size)
{
flags[maps[element]] = 0; //this won't be a problem, because we are overwriting the same thing
element += blockDim.x * gridDim.x;
}
}
__global__ void get_unassigned_kernel(INDEX_TYPE *unassigned_flags, INDEX_TYPE *map, INDEX_TYPE *output, INDEX_TYPE part_size, INDEX_TYPE uf_size )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < part_size)
{
if (map[idx] < uf_size)
{
if (unassigned_flags[map[idx]] == 0)
{
unassigned_flags[map[idx]] = 1;
output[idx] = 1;
}
}
idx += blockDim.x * gridDim.x;
}
}
__global__ void set_unassigned_kernel(INDEX_TYPE *part_assigned_flags, INDEX_TYPE *part_num, INDEX_TYPE *map, INDEX_TYPE *renum, INDEX_TYPE part_size, INDEX_TYPE max_element, INDEX_TYPE renum_size /*, INDEX_TYPE rank*/)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < part_size)
{
if (map[idx] < renum_size)
{
if (part_assigned_flags[idx] == 1)
{
renum[map[idx]] = max_element + part_num[idx];
}
//also update the B2L map
map[idx] = renum[map[idx]];
}
idx += blockDim.x * gridDim.x;
}
}
__global__ void renumber_b2l_maps(INDEX_TYPE *map, INDEX_TYPE *renum, INDEX_TYPE part_size, INDEX_TYPE renum_size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < part_size)
{
if (map[idx] < renum_size)
{
//update the B2L map
map[idx] = renum[map[idx]];
idx += blockDim.x * gridDim.x;
}
}
}
__global__ void calc_inverse_renumbering(INDEX_TYPE *renum, INDEX_TYPE *irenum, INDEX_TYPE max_element)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < max_element)
{
if (renum[idx] < 0 || renum[idx] >= max_element) { printf("Renumbering error: %d %d\n", renum[idx], max_element); }
irenum[renum[idx]] = idx;
idx += blockDim.x * gridDim.x;
}
}
__global__ void create_halo_mapping(INDEX_TYPE *mapping, INDEX_TYPE *node_list, int64_t base_index, INDEX_TYPE map_offset, INDEX_TYPE size)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
while (row < size)
{
int idx = node_list[row] - base_index;
mapping[idx] = map_offset + row;
row += blockDim.x * gridDim.x;
}
}
__global__ void apply_h2l2b_mapping(INDEX_TYPE *mapping, INDEX_TYPE *node_list, int64_t base_index, INDEX_TYPE *b2l_map, INDEX_TYPE size)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
while (row < size)
{
int idx = node_list[row] - base_index;
mapping[idx] = b2l_map[row];
row += blockDim.x * gridDim.x;
}
}
template <int coop>
__global__ void map_col_indices_and_count_rowlen(INDEX_TYPE *row_offsets, INDEX_TYPE *col_indices, INDEX_TYPE *row_length,
INDEX_TYPE *mapping, INDEX_TYPE num_rows, INDEX_TYPE insert_diagonal)
{
extern __shared__ volatile int reduction[];
int row = blockIdx.x * blockDim.x / coop + threadIdx.x / coop;
int coopIdx = threadIdx.x % coop;
while (row < num_rows)
{
int valid = 0;
for (int idx = row_offsets[row] + coopIdx; idx < row_offsets[row + 1]; idx += coop) //this may look horrible, but I expect low branch divergence, because col indices in a row usually belong to the same partition (or at most one more)
{
int colIdx = col_indices[idx];
int new_col_idx = mapping[colIdx];
if (new_col_idx >= 0)
{
valid++;
col_indices[idx] = new_col_idx;
}
else
{
col_indices[idx] = -1;
}
}
reduction[threadIdx.x] = valid;
for (int s = 2; s > 0; s >>= 1)
{
if (coopIdx < s)
{
reduction[threadIdx.x] += reduction[threadIdx.x + s];
}
__syncthreads();
}
if (coopIdx == 0)
{
row_length[row] = reduction[threadIdx.x] + insert_diagonal;
}
row += gridDim.x * blockDim.x / coop;
}
}
__global__ void renumber_P_col_indices(INDEX_TYPE *__restrict__ col_indices, const INDEX_TYPE *__restrict__ renum, INDEX_TYPE num_owned_coarse_pts, INDEX_TYPE num_owned_fine_pts)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < num_owned_fine_pts )
{
INDEX_TYPE col_id = col_indices[idx];
if (col_id < num_owned_coarse_pts)
{
col_indices[idx] = renum[col_id];
}
idx += blockDim.x * gridDim.x;
}
}
template <int coop, class T>
__global__ void reorder_R_matrix(const INDEX_TYPE *old_rows, const INDEX_TYPE *old_cols, const T *old_vals, const INDEX_TYPE *rows, INDEX_TYPE *cols, T *vals, const INDEX_TYPE *renumbering, INDEX_TYPE bsize, INDEX_TYPE num_rows, INDEX_TYPE num_owned_rows)
{
int row = blockIdx.x * blockDim.x / coop + threadIdx.x / coop;
int coopIdx = threadIdx.x % coop;
while (row < num_rows)
{
INDEX_TYPE src_base = old_rows[row];
INDEX_TYPE dst_base = row < num_owned_rows ? rows[renumbering[row]] : src_base;
for (int i = coopIdx; i < old_rows[row + 1]*bsize - src_base * bsize; i += coop)
{
vals[dst_base * bsize + i] = old_vals[src_base * bsize + i];
}
for (int i = coopIdx; i < old_rows[row + 1] - src_base; i += coop)
{
cols[dst_base + i] = old_cols[src_base + i];
}
row += blockDim.x * gridDim.x / coop;
}
}
template <int coop, class T>
__global__ void reorder_whole_matrix(INDEX_TYPE *old_rows, INDEX_TYPE *old_cols, T *old_vals, INDEX_TYPE *rows, INDEX_TYPE *cols, T *vals, INDEX_TYPE *renumbering, INDEX_TYPE bsize, INDEX_TYPE num_rows, INDEX_TYPE insert_diagonal)
{
int row = blockIdx.x * blockDim.x / coop + threadIdx.x / coop;
int coopIdx = threadIdx.x % coop;
while (row < num_rows)
{
INDEX_TYPE src_base = old_rows[row];
INDEX_TYPE dst_base = rows[renumbering[row]];
if (insert_diagonal)
{
if (coopIdx == 0) { cols[dst_base] = renumbering[row]; }
for (int i = coopIdx; i < bsize; i += coop)
{
vals[dst_base * bsize + i] = old_vals[(old_rows[num_rows] + row) * bsize + i];
}
dst_base++;
}
for (int i = coopIdx; i < old_rows[row + 1]*bsize - src_base * bsize; i += coop)
{
vals[dst_base * bsize + i] = old_vals[src_base * bsize + i];
}
for (int i = coopIdx; i < old_rows[row + 1] - src_base; i += coop)
{
cols[dst_base + i] = old_cols[src_base + i];
}
row += blockDim.x * gridDim.x / coop;
}
}
template <int coop, class T>
__global__ void replace_values_matrix(const T *src_vals_h, const T *src_diag_h, const INDEX_TYPE *old_rows, const INDEX_TYPE *rows, T *vals, const INDEX_TYPE *renumbering, INDEX_TYPE bsize, INDEX_TYPE num_rows)
{
int row = blockIdx.x * blockDim.x / coop + threadIdx.x / coop;
int coopIdx = threadIdx.x % coop;
while (row < num_rows)
{
INDEX_TYPE src_base = old_rows[row];
INDEX_TYPE dst_base = rows[renumbering[row]];
for (int i = coopIdx; i < bsize; i += coop)
{
vals[dst_base * bsize + i] = src_diag_h[row * bsize + i];
}
dst_base++;
for (int i = coopIdx; i < old_rows[row + 1]*bsize - src_base * bsize; i += coop)
{
vals[dst_base * bsize + i] = src_vals_h[src_base * bsize + i];
}
row += blockDim.x * gridDim.x / coop;
}
}
template <int coop, class T>
__global__ void replace_values_matrix(const T *src_vals_h, const INDEX_TYPE *old_rows, const INDEX_TYPE *rows, T *vals, const INDEX_TYPE *renumbering, INDEX_TYPE bsize, INDEX_TYPE num_rows)
{
int row = blockIdx.x * blockDim.x / coop + threadIdx.x / coop;
int coopIdx = threadIdx.x % coop;
while (row < num_rows)
{
INDEX_TYPE src_base = old_rows[row];
INDEX_TYPE dst_base = rows[renumbering[row]];
for (int i = coopIdx; i < old_rows[row + 1]*bsize - src_base * bsize; i += coop)
{
vals[dst_base * bsize + i] = src_vals_h[src_base * bsize + i];
}
row += blockDim.x * gridDim.x / coop;
}
}
//TODO: optimize by vectorizing
template <class T>
__global__ void reorder_whole_halo_matrix(INDEX_TYPE *old_rows, INDEX_TYPE *old_cols, T *old_vals, INDEX_TYPE *rows, INDEX_TYPE *cols, T *vals,
INDEX_TYPE *renumbering, INDEX_TYPE bsize, INDEX_TYPE num_rows, INDEX_TYPE insert_diagonal,
INDEX_TYPE global_offset, INDEX_TYPE local_offset, INDEX_TYPE halo_rows)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
while (row < num_rows)
{
INDEX_TYPE src_base = old_rows[row];
INDEX_TYPE dst = rows[row];
if (insert_diagonal)
{
cols[dst] = global_offset + row;
for (int j = 0; j < bsize; j++) { vals[dst * bsize + j] = old_vals[(old_rows[halo_rows - local_offset] + local_offset + row) * bsize + j]; }
dst++;
}
for (int i = 0; i < old_rows[row + 1] - src_base; i++)
{
INDEX_TYPE colIdx = old_cols[src_base + i];
if (colIdx >= 0)
{
cols[dst] = colIdx;
for (int j = 0; j < bsize; j++) { vals[dst * bsize + j] = old_vals[(src_base + i) * bsize + j]; }
dst++;
}
}
row += blockDim.x * gridDim.x;
}
}
__global__ void calc_rowlen_reorder(INDEX_TYPE *row_offsets, INDEX_TYPE *row_len, INDEX_TYPE *map, INDEX_TYPE size, INDEX_TYPE insert_diag)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < size)
{
row_len[map[idx]] = row_offsets[idx + 1] - row_offsets[idx] + insert_diag;
idx += blockDim.x * gridDim.x;
}
}
template < class TConfig >
void DistributedManagerBase<TConfig>::remove_boundary(IVector_d &flagArray, IVector_d &B2L_map, int size)
{
int num_blocks = min(4096, (size + 127) / 128);
hipLaunchKernelGGL(( remove_boundary_kernel) , dim3(num_blocks), dim3(128), 0, 0, flagArray.raw(), B2L_map.raw(), size);
cudaCheckError();
}
template < class TConfig >
void DistributedManagerBase<TConfig>::get_unassigned(IVector_d &flagArray, IVector_d &B2L_map, IVector_d &partition_flags, int size, int global_size /*, int rank*/)
{
int num_blocks = min(4096, (size + 191) / 192);
hipLaunchKernelGGL(( get_unassigned_kernel) , dim3(num_blocks), dim3(192), 0, 0, flagArray.raw(),
B2L_map.raw(),
partition_flags.raw(), size, global_size /*, rank*/);
cudaCheckError();
}
template < class TConfig >
void DistributedManagerBase<TConfig>::set_unassigned(IVector_d &partition_flags, IVector_d &partition_renum, IVector_d &B2L_map, IVector_d &renumbering, int size, int max_element, int global_size /*, int rank*/)
{
int num_blocks = min(4096, (size + 191) / 192);
hipLaunchKernelGGL(( set_unassigned_kernel) , dim3(num_blocks), dim3(192), 0, 0, partition_flags.raw(),
partition_renum.raw(),
B2L_map.raw(),
renumbering.raw(),
size, max_element, global_size /*,rank*/);
cudaCheckError();
}
template <class TConfig >
inline void DistributedManagerBase<TConfig>::set_initialized(IVector &row_offsets)
{
// For P and R sizes the sizes are fixed at creation
if(m_fixed_view_size)
{
return;
}
if (neighbors.size() > 0)
{
//distributed: cache num_rows/num_nz for different views
_num_rows_interior = _num_interior_nodes;
_num_nz_interior = row_offsets[_num_rows_interior];
_num_rows_owned = _num_interior_nodes + _num_boundary_nodes;
_num_nz_owned = row_offsets[_num_rows_owned];
_num_rows_full = halo_offsets[neighbors.size()];
if (_num_rows_full >= row_offsets.size())
{
_num_nz_full = row_offsets[row_offsets.size() - 1];
}
else
{
_num_nz_full = row_offsets[_num_rows_full];
}
_num_rows_all = halo_offsets[halo_offsets.size() - 1];
_num_nz_all = _num_nz_full;
}
else
{
_num_rows_interior = _num_interior_nodes;
_num_nz_interior = row_offsets[_num_rows_interior];
_num_rows_owned = _num_interior_nodes;
_num_nz_owned = row_offsets[_num_rows_owned];
_num_rows_full = _num_rows_owned;
_num_nz_full = _num_nz_owned;
_num_rows_all = _num_rows_owned;
_num_nz_all = _num_nz_owned;
}
}
template <class TConfig >
void DistributedManagerBase<TConfig>::createAggregatesRenumbering(IVector_h &renumbering, IVector_h_vector &B2L_maps, int size, int num_neighbors, int &num_interior_aggregates, int &num_boundary_aggregates, int num_rings)
{
createAggRenumbering(renumbering, B2L_maps, size, num_neighbors, num_interior_aggregates, num_boundary_aggregates, num_rings);
}
template <class TConfig >
void DistributedManagerBase<TConfig>::createAggregatesRenumbering(IVector_d &renumbering, IVector_d_vector &B2L_maps, int size, int num_neighbors, int &num_interior_aggregates, int &num_boundary_aggregates, int num_rings)
{
createAggRenumbering(renumbering, B2L_maps, size, num_neighbors, num_interior_aggregates, num_boundary_aggregates, num_rings);
}
template <class TConfig >
template <class IVector_hd>
void DistributedManagerBase<TConfig>::createAggRenumbering(IVector_hd &renumbering, std::vector<IVector_hd> &B2L_maps, int size, int num_neighbors, int &num_interior_aggregates, int &num_boundary_aggregates, int num_rings)
{
if (num_rings != 1)
{
FatalError("num_rings > 1 not supported in consolidation", AMGX_ERR_NOT_IMPLEMENTED);
}
//int num_neighbors = this->neighbors.size();
if (num_neighbors == 0)
{
num_boundary_aggregates = 0;
num_interior_aggregates = size;
return;
}
//initial size to size+1 so we have the total size after a scan
int global_size = size;
renumbering.resize(size + 1);
//
// Step 1 - in the main matrix, separate interior and boundary nodes (1/0 in flagArray), renumber interior ones with an exclusive scan
//
IVector_hd flagArray(size + 1);
thrust::fill(flagArray.begin(), flagArray.begin() + size + 1, 1);
cudaCheckError();
//sets 1 for interior nodes, 0 for boundary node
for (int i = 0; i < num_neighbors; i++ )
{
int size = B2L_maps[i].size();
remove_boundary(flagArray, B2L_maps[i], size);
}
//gets the renumbering of interior nodes
thrust::exclusive_scan(flagArray.begin(), flagArray.begin() + size + 1, renumbering.begin());
cudaCheckError();
//
// Step 2 - Renumber nodes that are in the boundary, stepping through each B2L map, and renumbering ones that have not been renumbered yet
//
//what is the biggest B2L size
INDEX_TYPE max_size = 0;
for (int i = 0; i < num_neighbors; i++)
{
max_size = max_size > B2L_maps[i].size() ? max_size : B2L_maps[i].size();
}
//allocate work vectors (should be pretty small)
IVector_hd partition_flags(max_size);
IVector_hd partition_renum(max_size);
//the number of renumbered nodes so far
int max_element = renumbering[size];
num_interior_aggregates = max_element;
num_boundary_aggregates = size - max_element;
renumbering.resize(size);
for (int i = 0; i < num_neighbors; i++)
{
//find nodes that are part of the current boundary and they haven't been renumbered yet
thrust::fill(partition_flags.begin(), partition_flags.begin() + max_size, 0);
int size = B2L_maps[i].size();
get_unassigned(flagArray, B2L_maps[i], partition_flags, size, global_size/*,0*/);
//calculate the local renumbering (within this boundary region) of these nodes
thrust::exclusive_scan(partition_flags.begin(), partition_flags.begin() + max_size, partition_renum.begin());
//apply renumbering to the big numbering table
set_unassigned(partition_flags, partition_renum, B2L_maps[i], renumbering, size, max_element, global_size/*,0*/);
//update the number of renumbered nodes
max_element += partition_renum[max_size - 1] + partition_flags[max_size - 1];
}
cudaCheckError();
}
template <class TConfig>
inline DistributedManagerBase<TConfig>::DistributedManagerBase(Matrix<TConfig> &a) :
m_fine_level_comms(NULL), A(&a), m_pinned_buffer_size(0), m_pinned_buffer(NULL), _num_interior_nodes(0), _num_boundary_nodes(0), _comms(NULL), has_B2L(false),
neighbors(_neighbors), B2L_maps(_B2L_maps), L2H_maps(_L2H_maps), B2L_rings(_B2L_rings),
halo_rows_ref_count(0), halo_btl_ref_count(0), halo_ranges(_halo_ranges), halo_ranges_h(_halo_ranges_h), part_offsets(_part_offsets), part_offsets_h(_part_offsets_h), halo_rows(NULL), halo_btl(NULL), m_is_root_partition(false), m_is_glued(false), m_is_fine_level_glued(false), m_is_fine_level_consolidated(false), m_is_fine_level_root_partition(false), m_use_cuda_ipc_consolidation(false), m_fixed_view_size(false)
{
hipEventCreate(&comm_event);
hipStreamCreateWithFlags(&m_int_stream, hipStreamNonBlocking);
hipStreamCreateWithFlags(&m_bdy_stream, hipStreamNonBlocking);
this->createComms(A->getResources());
int my_id = this->getComms()->get_global_id();
int num_parts = this->getComms()->get_num_partitions();
this->set_global_id(my_id);
this->set_num_partitions(num_parts);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::generatePoisson7pt(int nx, int ny, int nz, int P, int Q, int R)
{
int my_id = this->getComms()->get_global_id();
int p, q, r;
if (nx < P || ny < Q || nz < R)
{
FatalError("(nx < P) or (ny < Q) or (nz < R) not supported\n", AMGX_ERR_NOT_IMPLEMENTED);
}
/* compute p,q,r from P,Q,R and myid */
p = my_id % P; // Position in x direction
q = (( my_id - p) / P) % Q; // Position in y
r = ( my_id - p - P * q) / ( P * Q ); // Position in z
// Create A.row_indices, A.col_indices, A.values, A.diag
int num_rows = nx * ny * nz;
int num_nonzeros = num_rows * 7; // Ignoring any boundary, 7 nnz per row
int num_substract = 0;
if (p == 0) { num_substract += ny * nz; }
if (p == P - 1) { num_substract += ny * nz; }
if (q == 0) { num_substract += nx * nz; }
if (q == Q - 1) { num_substract += nx * nz; }
if (r == 0) { num_substract += nx * ny; }
if (r == R - 1) { num_substract += nx * ny; }
num_nonzeros -= num_substract;
int num_halo_nodes = 2 * (ny * nz + nx * nz + nx * ny) - num_substract;
this->local_to_global_map.resize(num_halo_nodes);
this->A->set_initialized(0);
this->A->resize(0, 0, 0, 1, 1, 1);
this->A->addProps(CSR);
this->A->resize(num_rows, num_rows + num_halo_nodes, num_nonzeros, 1, 1, 1);
const int cta_size = 128;
const int grid_size = ::min( 4096, (num_rows + cta_size - 1) / cta_size );
hipLaunchKernelGGL(( poisson7pt_count_row_len) , dim3(grid_size), dim3(cta_size), 0, 0, this->A->row_offsets.raw(), nx, ny, nz, p, q, r, P, Q, R, num_rows);
thrust_wrapper::exclusive_scan(this->A->row_offsets.begin(), this->A->row_offsets.end(), this->A->row_offsets.begin());
cudaCheckError();
// Now set nonzeros columns and values
// TODO: vectorize this
const int grid_size2 = ::min( 4096, (num_rows + cta_size - 1) / cta_size );
hipLaunchKernelGGL(( poisson7pt_set_col_values) , dim3(grid_size2), dim3(cta_size), 0, 0,
this->A->row_offsets.raw(),
this->A->col_indices.raw(),
this->A->values.raw(),
this->A->diag.raw(),
this->local_to_global_map.raw(),
nx, ny, nz,
p, q, r,
P, Q, R,
num_rows);
cudaCheckError();
// fill parts_offsets_h
// All ranks have same number of nodes
int num_ranks = P * Q * R;
this->part_offsets_h.resize(num_ranks + 1);
this->part_offsets_h[0] = (int64_t) 0;
for (int i = 1; i < num_ranks + 1; i++)
{
this->part_offsets_h[i] = this->part_offsets_h[i - 1] + (int64_t) num_rows;
}
// Device to host copy
this->part_offsets = this->part_offsets_h;
this->num_rows_global = P * Q * R * nx * ny * nz;
// this->A->set_initialized(1);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
template <typename t_colIndex>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributed_SetOffsets(
int num_ranks, int num_rows_global, const t_colIndex* partition_offsets)
{
// fill part offsets internal data structures
this->part_offsets_h.resize(num_ranks + 1);
for (int i = 0; i <= num_ranks; i++)
{
this->part_offsets_h[i] = partition_offsets[i];
}
// copy to device
this->part_offsets = this->part_offsets_h;
// set num of global rows
this->num_rows_global = num_rows_global;
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
template <typename t_colIndex>
map<t_colIndex, int> DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributed_LocalToGlobal(int num_rows, I64Vector_h &off_diag_cols)
{
// sort global column indices
thrust::sort(off_diag_cols.begin(), off_diag_cols.end());
// find unique columns and set local <-> global mappings
// 1) Removed unneeded vector 2) Create map on host first, upload later (less thrust calls)
I64Vector_h local_to_global_h;
map<t_colIndex, int> global_to_local; // temporary
if (off_diag_cols.size() > 0)
{
global_to_local[off_diag_cols[0]] = num_rows;
local_to_global_h.push_back(off_diag_cols[0]);
}
for (int i = 1; i < off_diag_cols.size(); i++)
{
if (off_diag_cols[i] != off_diag_cols[i - 1])
{
global_to_local[off_diag_cols[i]] = num_rows + local_to_global_h.size();
local_to_global_h.push_back(off_diag_cols[i]);
}
}
// Upload finished map in one piece
this->local_to_global_map.resize(local_to_global_h.size());
thrust::copy(local_to_global_h.begin(), local_to_global_h.end(), this->local_to_global_map.begin());
return global_to_local;
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributed_InitLocalMatrix(
IVector_h local_col_indices,
int num_rows,
int num_nonzeros,
const int block_dimx,
const int block_dimy,
const int *row_offsets,
const mat_value_type *values,
const void *diag)
{
// init local matrix
this->A->set_initialized(0);
this->A->resize(0, 0, 0, 1, 1, 1);
this->A->addProps(CSR);
if (diag)
{
this->A->addProps(DIAG);
}
this->A->resize(num_rows, num_rows + this->local_to_global_map.size(), num_nonzeros, block_dimx, block_dimy, 1);
cudaCheckError();
// set local matrix
thrust::copy(row_offsets, row_offsets + num_rows + 1, this->A->row_offsets.begin());
this->A->col_indices = local_col_indices;
thrust::copy(values, values + num_nonzeros * block_dimx * block_dimy, this->A->values.begin());
cudaCheckError();
// setup diagonal
if (diag)
{
hipMemcpy(this->A->values.raw() + this->A->diagOffset()*this->A->get_block_size(), diag, sizeof(mat_value_type) * num_rows * block_dimx * block_dimy, hipMemcpyDefault);
}
else
{
this->A->computeDiagonal();
}
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
template <typename t_colIndex>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributedMatrixPartitionVec(
int num_rows,
int num_nonzeros,
const int block_dimx,
const int block_dimy,
const int *row_offsets,
const t_colIndex *col_indices,
const mat_value_type *values,
int num_ranks,
int num_rows_global,
const void *diag,
const int *partition)
{
// fetch my rank
int my_id = this->getComms()->get_global_id();
// setup partition vector
IVector_h partitionVec(num_rows_global);
if (partition == NULL)
{
IVector_h rowCounts(num_ranks);
this->getComms()->all_gather(num_rows, rowCounts, 1);
int p = 0;
for (int i = 0; i < num_ranks; ++i)
{
for (int j = 0; j < rowCounts[i]; ++j)
{
partitionVec[p++] = i;
}
}
}
else
{
// use existing partition info
for (int i = 0; i < num_rows_global; i++)
{
partitionVec[i] = partition[i];
}
}
// compute partition offsets (based on number of elements per partition). Will be modified when calculating partition map.
t_colIndex *partition_offsets = (t_colIndex *)calloc(num_ranks + 1, sizeof(t_colIndex));
for (int i = 0; i < num_rows_global; i++)
{
int pvi = partitionVec[i];
partition_offsets[pvi + 1]++;
}
thrust::inclusive_scan(partition_offsets, partition_offsets + num_ranks + 1, partition_offsets);
loadDistributed_SetOffsets(num_ranks, num_rows_global, partition_offsets);
// compute partition map (which tells you how the global elements are mapped into the partitions)
t_colIndex *partition_map = (t_colIndex *)calloc(num_rows_global, sizeof(t_colIndex));
for (int i = 0; i < num_rows_global; i++)
{
int pvi = partitionVec[i];
t_colIndex poi = partition_offsets[pvi];
partition_map[poi] = i;
partition_offsets[pvi]++;
}
free(partition_offsets);
// compute the inverse partition map
t_colIndex *ipartition_map = (t_colIndex *)calloc(num_rows_global, sizeof(t_colIndex));
for (int i = 0; i < num_rows_global; i++)
{
ipartition_map[partition_map[i]] = i;
}
free(partition_map);
int h_cidx_allocated = 0;
const t_colIndex *h_col_indices_global = (const t_colIndex *)this->getHostPointerForData(col_indices, num_nonzeros * sizeof(t_colIndex), &h_cidx_allocated);
// gather all off-diag columns
I64Vector_h off_diag_cols;
for (int i = 0; i < num_nonzeros; i++)
{
if (partitionVec[h_col_indices_global[i]] != my_id)
{
off_diag_cols.push_back(ipartition_map[h_col_indices_global[i]]);
}
}
auto global_to_local = loadDistributed_LocalToGlobal<t_colIndex>(num_rows, off_diag_cols);
// set 1, then scan to compute local row indices
IVector_h my_indices(num_rows_global);
for (int i = 0; i < num_nonzeros; i++)
{
if (partitionVec[h_col_indices_global[i]] == my_id) // find my local columns and set to 1
{
my_indices[ipartition_map[h_col_indices_global[i]]] = 1;
}
}
thrust::exclusive_scan(my_indices.begin(), my_indices.end(), my_indices.begin());
// remap colums to local
IVector_h local_col_indices(num_nonzeros);
for (int i = 0; i < num_nonzeros; i++)
{
if (partitionVec[h_col_indices_global[i]] != my_id)
{
// off-diag
local_col_indices[i] = global_to_local[ipartition_map[h_col_indices_global[i]]];
}
else
{
// diag
local_col_indices[i] = my_indices[ipartition_map[h_col_indices_global[i]]];
}
}
free(ipartition_map);
loadDistributed_InitLocalMatrix(local_col_indices, num_rows, num_nonzeros, block_dimx, block_dimy, row_offsets, values, diag);
cudaCheckError();
// don't free possibly allocated pinned buffer, since it could be used later. if it would not - it would be deallocated automatically
/*if (h_cidx_allocated)
{
free((void*)h_col_indices_global);
}*/
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
template <typename t_colIndex>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributedMatrixPartitionOffsets(
int num_rows,
int num_nonzeros,
const int block_dimx,
const int block_dimy,
const int *row_offsets,
const t_colIndex *col_indices,
const mat_value_type *values,
int num_ranks,
int num_rows_global,
const void *diag,
const t_colIndex *partition_offsets)
{
// fetch my rank
int my_id = this->getComms()->get_global_id();
// sanity check, cheap to perform, and helps prevent harder-to-debug errors later on
if (!std::is_sorted(partition_offsets, partition_offsets + num_ranks + 1)) {
FatalError("Partition offsets are not sorted.", AMGX_ERR_BAD_PARAMETERS);
}
loadDistributed_SetOffsets(num_ranks, num_rows_global, partition_offsets);
// Create predicate to determine if a column is in the local diagonal block
t_colIndex my_first_col = this->part_offsets_h[my_id];
t_colIndex one_past_my_last_col = this->part_offsets_h[my_id + 1];
auto in_local_diagonal_block = [my_first_col, one_past_my_last_col](const t_colIndex col_index) {
return col_index >= my_first_col && col_index < one_past_my_last_col;
};
int h_cidx_allocated = 0;
const t_colIndex *h_col_indices_global = (const t_colIndex *)this->getHostPointerForData(col_indices, num_nonzeros * sizeof(t_colIndex), &h_cidx_allocated);
// gather all off-diag columns
I64Vector_h off_diag_cols;
for (int i = 0; i < num_nonzeros; i++)
{
if (!in_local_diagonal_block(h_col_indices_global[i]))
{
off_diag_cols.push_back(h_col_indices_global[i]);
}
}
auto global_to_local = loadDistributed_LocalToGlobal<t_colIndex>(num_rows, off_diag_cols);
// set 1, then scan to compute local row indices
// "coordinate-shift" columns so they lie in much smaller range of my diagonal indices
int diagonal_size = this->part_offsets_h[my_id + 1] - this->part_offsets_h[my_id];
IVector_h my_indices(diagonal_size);
for (int i = 0; i < num_nonzeros; i++)
{
t_colIndex col_index = h_col_indices_global[i];
if (in_local_diagonal_block(h_col_indices_global[i])) // find my local columns and set to 1
{
// olumns that are on *my* diag partition cannot have an index from 0..num_rows_global
// instead, part_offsets_h[my_id] <= col_index < part_offsets[my_id+1]
col_index -= this->part_offsets_h[my_id];
my_indices[col_index] = 1;
}
}
thrust::exclusive_scan(my_indices.begin(), my_indices.end(), my_indices.begin());
// remap colums to local
IVector_h local_col_indices(num_nonzeros);
for (int i = 0; i < num_nonzeros; i++)
{
t_colIndex col_index = h_col_indices_global[i];
if (!in_local_diagonal_block(col_index))
{
// off-diag
local_col_indices[i] = global_to_local[col_index];
}
else
{
// diag
col_index -= this->part_offsets_h[my_id];
local_col_indices[i] = my_indices[col_index];
}
}
loadDistributed_InitLocalMatrix(local_col_indices, num_rows, num_nonzeros, block_dimx, block_dimy, row_offsets, values, diag);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
template <typename t_colIndex>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributedMatrix(
int num_rows,
int num_nonzeros,
const int block_dimx,
const int block_dimy,
const int *row_offsets,
const t_colIndex *col_indices,
const mat_value_type *values,
int num_ranks,
int num_rows_global,
const void *diag,
const MatrixDistribution &dist)
{
using PI = MatrixDistribution::PartitionInformation;
switch (dist.getPartitionInformationStyle()) {
case PI::PartitionVec:
loadDistributedMatrixPartitionVec(num_rows, num_nonzeros, block_dimx, block_dimy,
row_offsets, col_indices, values, num_ranks, num_rows_global, diag, (const int*) dist.getPartitionData());
break;
case PI::PartitionOffsets:
loadDistributedMatrixPartitionOffsets(num_rows, num_nonzeros, block_dimx, block_dimy,
row_offsets, col_indices, values, num_ranks, num_rows_global, diag, (const t_colIndex*) dist.getPartitionData());
break;
default:
FatalError("Unsupported partitioning data format used with loadDistributedMatrix", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::renumberMatrixOneRing(int update_neighbours)
{
FatalError("Distributed classical AMG not implemented on host", AMGX_ERR_NOT_IMPLEMENTED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::renumberMatrixOneRing(int update_neighbours)
{
// Step 1: Using halo_ranges, flag neighbors and at the same time, flag halo_nodes (flag_halo_nodes_local)
int my_id = this->global_id();
int num_parts = this->get_num_partitions();
this->set_base_index(this->part_offsets_h[my_id]);
this->set_index_range(this->part_offsets_h[my_id + 1] - this->part_offsets_h[my_id]);
DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>;
// Create/update list of neighbors
if (update_neighbours)
{
typedef typename TConfig::template setVecPrec<AMGX_vecInt64>::Type i64vec_value_type;
typedef Vector<i64vec_value_type> I64Vector;
typedef typename Matrix<TConfig>::MVector MVector;
std::vector<IVector> halo_row_offsets(this->neighbors.size());
std::vector<I64Vector> halo_global_indices(this->neighbors.size());
std::vector<MVector> halo_values(this->neighbors.size());
prep->create_halo_rows_global_indices(*(this->A), halo_row_offsets, halo_global_indices, halo_values);
prep->update_neighbors_list(*(this->A), this->neighbors, this->halo_ranges_h, this->halo_ranges, this->part_offsets_h, this->part_offsets, halo_row_offsets, halo_global_indices);
}
else
{
prep->create_neighbors_v2(*(this->A));
}
this->getComms()->set_neighbors(this->neighbors.size());
// Create B2L_maps and L2H_maps
prep->create_boundary_lists_v3(*(this->A));
// halo_offsets
int neighbors = this->A->manager->num_neighbors();
int A_num_rows, offset;
this->A->getOffsetAndSizeForView(OWNED, &offset, &A_num_rows);
this->halo_offsets.resize(neighbors + 1, 0);
this->halo_offsets[0] = A_num_rows;
for (int i = 0; i < neighbors; i++)
{
this->halo_offsets[i + 1] = this->halo_offsets[i] + this->B2L_maps[i].size();
}
this->getComms()->exchange_vectors(this->A->manager->B2L_maps, *(this->A), 0);
// Initialize B2L_rings
int num_neighbors = this->neighbors.size();
this->B2L_rings.resize(num_neighbors);
for (int i = 0; i < num_neighbors; i++)
{
this->B2L_rings[i].resize(2);
this->B2L_rings[i][0] = 0;
this->B2L_rings[i][1] = this->B2L_maps[i].size();
}
prep->initialize_B2L_maps_offsets(*(this->A), 1);
delete prep;
//Use the exchanged halo row matrices and the boundary/halo index lists to renumber and consolidate the matrix
// Step 5: renumber all owned rows and columns
this->reorder_matrix_owned();
// Step 6: renumber local_to_global_map
int num_owned_rows = this->A->manager->halo_offsets[0];
int size_one_ring;
this->A->getOffsetAndSizeForView(FULL, &offset, &size_one_ring);
I64Vector_d global_col_indices(size_one_ring);
thrust::sequence(global_col_indices.begin(), global_col_indices.begin() + num_owned_rows, this->base_index() );
cudaCheckError();
global_col_indices.dirtybit = 1;
this->exchange_halo(global_col_indices, global_col_indices.tag);
thrust_wrapper::copy(global_col_indices.begin() + num_owned_rows, global_col_indices.begin() + size_one_ring, this->local_to_global_map.begin(), this->get_int_stream(), true);
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::renumber_P_R(Matrix_h &P, Matrix_h &R, Matrix_h &A_fine)
{
FatalError("Distributed classical AMG not implemented on host", AMGX_ERR_NOT_IMPLEMENTED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::renumber_P_R(Matrix_d &P, Matrix_d &R, Matrix_d &A_fine)
{
int cta_size = 256;
int num_owned_fine_pts = A_fine.manager->halo_offsets[0];
int num_owned_coarse_pts, offset;
// matrix Ac
this->A->getOffsetAndSizeForView(OWNED, &offset, &num_owned_coarse_pts);
// Renumber the owned col indices of P (not the halo columns ,since P.manager was created assunming some other numbering)
int nnz_owned_fine_pts = P.row_offsets[num_owned_fine_pts];
int num_blocks_fine = min(4096, (nnz_owned_fine_pts + cta_size - 1) / cta_size);
if (num_blocks_fine > 0)
{
hipLaunchKernelGGL(( renumber_P_col_indices) , dim3(num_blocks_fine), dim3(cta_size), 0, 0, P.col_indices.raw(), this->renumbering.raw(), num_owned_coarse_pts, nnz_owned_fine_pts);
cudaCheckError();
}
// Renumber the B2L_maps of P
for (int i = 0; i < P.manager->neighbors.size(); i++)
{
thrust::copy(thrust::make_permutation_iterator(this->renumbering.begin(), P.manager->B2L_maps[i].begin()),
thrust::make_permutation_iterator(this->renumbering.begin(), P.manager->B2L_maps[i].end()),
P.manager->B2L_maps[i].begin());
}
cudaCheckError();
// Don't renumber the L2H_maps or the halo
// Renumber the local_to_global_map of matrix P (since neighbors renumbered their owned rows)
// Swap owned rows of R
IVector new_row_offsets(R.row_offsets.size());
int insert = 0;
// Only renumber the owned rows
int num_blocks_owned = min(4096, (num_owned_coarse_pts + cta_size - 1) / cta_size);
if (num_blocks_owned > 0)
{
hipLaunchKernelGGL(( calc_rowlen_reorder) , dim3(num_blocks_owned), dim3(cta_size) , 0, 0, R.row_offsets.raw(), new_row_offsets.raw(), this->renumbering.raw(), num_owned_coarse_pts, insert);
cudaCheckError();
}
thrust_wrapper::exclusive_scan(new_row_offsets.begin(), new_row_offsets.begin() + num_owned_coarse_pts + 1, new_row_offsets.begin());
cudaCheckError();
// Copy the row_offsets for halo rows
thrust::copy(R.row_offsets.begin() + num_owned_coarse_pts, R.row_offsets.end(), new_row_offsets.begin() + num_owned_coarse_pts);
cudaCheckError();
// Reorder the rows of R (no need to reorder the column indices)
int new_nnz = new_row_offsets[new_row_offsets.size() - 1];
int halo_offset = new_row_offsets[num_owned_coarse_pts];
typedef typename MatPrecisionMap<t_matPrec>::Type ValueTypeA;
VVector new_values(new_nnz * R.get_block_size(), types::util< ValueTypeA >::get_zero());
IVector new_col_indices(new_nnz, 0);
int num_blocks_total = min(4096, (R.get_num_rows() + cta_size - 1) / cta_size);
if (num_blocks_total > 0)
{
hipLaunchKernelGGL(( reorder_R_matrix <32>) , dim3(num_blocks_total), dim3(512), 0, 0, R.row_offsets.raw(), R.col_indices.raw(), R.values.raw(), new_row_offsets.raw(), new_col_indices.raw(), new_values.raw(), this->renumbering.raw(), R.get_block_size(), R.get_num_rows(), num_owned_coarse_pts);
cudaCheckError();
}
R.col_indices.swap(new_col_indices);
R.row_offsets.swap(new_row_offsets);
R.values.swap(new_values);
// Renumber the local_to_global_map (since neighbors have changed their owned numbering)
if (P.manager->neighbors.size() != 0)
{
int size_one_ring = P.manager->halo_offsets[P.manager->neighbors.size()];
I64Vector_d global_col_indices(size_one_ring);
thrust::sequence(global_col_indices.begin(), global_col_indices.begin() + num_owned_coarse_pts, this->base_index());
cudaCheckError();
global_col_indices.dirtybit = 1;
P.manager->exchange_halo(global_col_indices, global_col_indices.tag);
thrust_wrapper::copy(global_col_indices.begin() + num_owned_coarse_pts, global_col_indices.begin() + size_one_ring, P.manager->local_to_global_map.begin(), this->get_int_stream(), true);
cudaCheckError();
}
DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>;
prep->initialize_B2L_maps_offsets(P, 1);
delete prep;
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::createOneRingB2Lmaps()
{
// Input:
// a matrix with N rows, whose column indices are local indices from 0 to N+M-1,
// where M is a number of 1-ring halo vertices
// The matrix also contains array "local_to_global_map" of size M, which stores the global index of each halo index
// Ex: assuming a column has index N+K, where 0 <= K < M, then it's global id is local_to_global_map[K]
// The matrix also contains part_offsets_h and part_offsets array, which stores where each partition begins
// Output:
// This function creates all the necessary data to to 1-ring exchanges
// i.e. list of 1-ring neighbors, B2L_maps for 1-ring, halo_offsets for 1-ring,
// Also, the function reorders the halo indices, such that 1-ring indices are in the order
// of neighbors, and therefore, exchange_halo doesn't have to be changed (i.e. L2H = identity)
// What is does:
// Based on the global indices of its halo vertices, count the number of neighbors
// For each neighbor, receive the halo indices that will be needed by neighbor
// From those, create B2L_maps[0], which contains for all neighbors
// This function assumes that:
// part_offset is defined
// B2L_maps
int my_id = this->global_id();
int num_parts = this->get_num_partitions();
this->set_base_index(this->part_offsets_h[my_id]);
this->set_index_range(this->part_offsets_h[my_id + 1] - this->part_offsets_h[my_id]);
DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>;
// This function creates the array neighbors, which contains a list of partitions to which data
// needs to be sent and/or received
prep->create_neighbors_v2(*(this->A));
// Here change the manager if some partitions have no neighbors
this->getComms()->set_neighbors(this->neighbors.size());
prep->create_B2L_one_ring(*(this->A));
delete prep;
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::createOneRingHaloRows()
{
// Input:
// A matrix with 1-ring B2L_maps, 1-ring halo_offsets
// Outputs:
// A matrix with: 1-ring rows,
// 2-ring B2L_maps,
// 2-ring halo_offsets
// 2-ring neighbors
// Implement here:
// Look at function create_B2L_from_maps, which calls create_rings, create_halo_btl, create_halo_rows and comms->exchange_matrix_halo
DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>;
prep->create_one_ring_halo_rows(*(this->A));
// I believe this can be removed since we don't use masked SpMV anymore
prep->createRowsLists(*(this->A), false);
delete prep;
// this is not necessary anymore becasue we don't use latency hiding
// however in future we might want to get back to this in case we want to use latency hiding
//this->reorder_matrix();
}
template <class TConfig>
inline DistributedManagerBase<TConfig>::DistributedManagerBase(
Matrix<TConfig> &a,
INDEX_TYPE allocated_halo_depth,
INDEX_TYPE num_import_rings,
int num_neighbors,
const VecInt_t *neighbors_) : m_fine_level_comms(NULL), A(&a), m_pinned_buffer_size(0), m_pinned_buffer(NULL), _num_interior_nodes(0), _num_boundary_nodes(0), _comms(NULL), has_B2L(false), neighbors(_neighbors), halo_rows_ref_count(0), halo_rows(NULL), halo_btl_ref_count(0), halo_btl(NULL), halo_ranges(_halo_ranges), halo_ranges_h(_halo_ranges_h), part_offsets(_part_offsets), part_offsets_h(_part_offsets_h),
B2L_maps(_B2L_maps), L2H_maps(_L2H_maps), B2L_rings(_B2L_rings), m_is_root_partition(false), m_is_glued(false), m_is_fine_level_glued(false), m_is_fine_level_consolidated(false), m_is_fine_level_root_partition(false), m_use_cuda_ipc_consolidation(false), m_fixed_view_size(false)
{
hipStreamCreateWithFlags(&m_int_stream, hipStreamNonBlocking);
hipStreamCreateWithFlags(&m_bdy_stream, hipStreamNonBlocking);
if (num_import_rings != 1)
{
FatalError("num_rings > 1 not supported in fine_level consolidation", AMGX_ERR_NOT_IMPLEMENTED);
}
if (allocated_halo_depth != 1)
{
FatalError("allocated_halo_depth > 1 not supported in fine_level consolidation", AMGX_ERR_NOT_IMPLEMENTED);
}
this->set_num_halo_rings(num_import_rings);
neighbors.resize(num_neighbors);
hipMemcpy(neighbors.raw(), neighbors_, num_neighbors * sizeof(VecInt_t), hipMemcpyDefault);
cudaCheckError();
}
template <class TConfig>
inline void DistributedManagerBase<TConfig>::cacheMaps(const VecInt_t *b2l_maps, const VecInt_t *b2l_ptrs, const VecInt_t *l2h_maps, const VecInt_t *l2h_ptrs)
{
int num_neighbors = this->neighbors.size();
this->cached_B2L_maps.resize(num_neighbors);
this->cached_L2H_maps.resize(num_neighbors);
for (int i = 0; i < num_neighbors; i++)
{
int size = b2l_ptrs[i + 1] - b2l_ptrs[i];
this->cached_B2L_maps[i].resize(size);
int count = 0;
for (int j = b2l_ptrs[i]; j < b2l_ptrs[i + 1]; j++)
{
this->cached_B2L_maps[i][count] = b2l_maps[j];
count++;
}
size = l2h_ptrs[i + 1] - l2h_ptrs[i];
this->cached_L2H_maps[i].resize(size);
count = 0;
for (int j = l2h_ptrs[i]; j < l2h_ptrs[i + 1]; j++)
{
this->cached_L2H_maps[i][count] = l2h_maps[j];
count++;
}
}
}
template <class TConfig>
inline void DistributedManagerBase<TConfig>::cacheMapsOneRing()
{
int num_neighbors = this->neighbors.size();
this->cached_B2L_maps.resize(num_neighbors);
this->cached_L2H_maps.resize(num_neighbors);
for (int i = 0; i < num_neighbors; i++)
{
this->cached_B2L_maps[i] = this->B2L_maps[i];
this->cached_L2H_maps[i] = this->L2H_maps[i];
}
}
template <class TConfig>
inline void DistributedManagerBase<TConfig>::cacheMapsOneRing(const VecInt_t **b2l_maps, const VecInt_t *b2l_sizes, const VecInt_t **l2h_maps, const VecInt_t *l2h_sizes)
{
int num_neighbors = this->neighbors.size();
this->cached_B2L_maps.resize(num_neighbors);
this->cached_L2H_maps.resize(num_neighbors);
// buffering in the case of GPU data. This shouldn't much affect performance
std::vector<VecInt_t *> b2l_buffer, l2h_buffer;
std::vector<VecInt_t> b2l_sizes_buffer, l2h_sizes_buffer;
b2l_buffer.resize(num_neighbors);
l2h_buffer.resize(num_neighbors);
b2l_sizes_buffer.resize(num_neighbors);
l2h_sizes_buffer.resize(num_neighbors);
hipMemcpy(&(b2l_sizes_buffer[0]), b2l_sizes, sizeof(VecInt_t) * num_neighbors, hipMemcpyDefault);
hipMemcpy(&(l2h_sizes_buffer[0]), l2h_sizes, sizeof(VecInt_t) * num_neighbors, hipMemcpyDefault);
hipMemcpy(&(b2l_buffer[0]), b2l_maps, sizeof(VecInt_t *) * num_neighbors, hipMemcpyDefault);
hipMemcpy(&(l2h_buffer[0]), l2h_maps, sizeof(VecInt_t *) * num_neighbors, hipMemcpyDefault);
// caching all of the maps
for (int i = 0; i < num_neighbors; i++)
{
int size = b2l_sizes_buffer[i];
this->cached_B2L_maps[i].resize(size);
hipMemcpy(&(this->cached_B2L_maps[i][0]), b2l_buffer[i], sizeof(VecInt_t) * size, hipMemcpyDefault);
cudaCheckError();
size = l2h_sizes_buffer[i];
this->cached_L2H_maps[i].resize(size);
hipMemcpy(&(this->cached_L2H_maps[i][0]), l2h_buffer[i], sizeof(VecInt_t) * size, hipMemcpyDefault);
cudaCheckError();
}
}
template <class TConfig>
void DistributedManagerBase<TConfig>::setAConsolidationFlags( Matrix<TConfig> &in_A)
{
this->A = &in_A;
AMG_Config *rsrc_cfg = this->A->getResources()->getResourcesConfig();
std::string scope;
int consolidate_flag, cuda_ipc_flag;
rsrc_cfg->getParameter<int>("fine_level_consolidation", consolidate_flag, "default", scope);
rsrc_cfg->getParameter<int>("use_cuda_ipc_consolidation", cuda_ipc_flag, "default", scope);
this->m_is_fine_level_consolidated = (consolidate_flag != 0);
this->m_use_cuda_ipc_consolidation = (cuda_ipc_flag != 0);
}
template <class TConfig>
void DistributedManagerBase<TConfig>::uploadMatrix(int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag, Matrix<TConfig> &in_A)
{
this->setAConsolidationFlags(in_A);
if (this->m_is_fine_level_consolidated)
{
this->A->manager->consolidateAndUploadAll(n, nnz, block_dimx, block_dimy, row_ptrs, col_indices, data, diag, *(this->A));
}
else
{
this->A->manager->initializeUploadReorderAll(n, nnz, block_dimx, block_dimy, row_ptrs, col_indices, data, diag, *(this->A));
}
}
template <class TConfig>
void DistributedManagerBase<TConfig>::checkPinnedBuffer(size_t size)
{
if ((m_pinned_buffer_size < size) && (m_pinned_buffer != NULL))
{
hipHostFree(m_pinned_buffer);
m_pinned_buffer = NULL;
m_pinned_buffer_size = 0;
}
if (m_pinned_buffer == NULL)
{
m_pinned_buffer_size = (size_t)(size * 1.1);
hipHostMalloc(&m_pinned_buffer, m_pinned_buffer_size);
}
}
template <class TConfig>
DistributedManagerBase<TConfig>::~DistributedManagerBase()
{
if (m_pinned_buffer != NULL)
{
hipHostFree(m_pinned_buffer);
}
destroyComms();
// from childrens:
hipStreamDestroy(this->m_int_stream);
hipStreamDestroy(this->m_bdy_stream);
if (!this->halo_rows_ref_count && this->halo_rows != NULL)
{
delete this->halo_rows;
this->halo_rows = NULL;
}
if (!this->halo_btl_ref_count && this->halo_btl != NULL)
{
delete this->halo_btl;
this->halo_btl = NULL;
}
}
// if pointer is host pointer - returns data. If it is device pointer - copies it to the m_pinned_buffer and returns pointer to m_pinned_buffer
template <class TConfig>
void *DistributedManagerBase<TConfig>::getHostPointerForData(void *ptr, size_t size, int *allocated)
{
hipError_t rc;
hipPointerAttribute_t att;
void *ptr_h;
cudaCheckError();
/* WARNING: We may accept the following types of allocation for ptr:
1. malloc [host memory]
2. hipMalloc [device memory]
3. malloc + hipHostRegister [AMGX_pin_memory/AMGX_unpin_memory host memory]
4. hipHostMalloc [pinned host memory form the beginning]
The correct way to conver these cases is the following:
hipPointerAttribute_t att;
hipError_t st = hipPointerGetAttributes(&att, ptr);
if (st == hipSuccess) {
//you are in case 2, 3 or 4.
}
else{
//you are in case 1.
}
The following pattern of checks should be implemented
hipPointerAttribute_t att;
hipError_t st = hipPointerGetAttributes(&att, ptr);
if (st == hipSuccess) {
//you are in case 2 or 4.
}
else{
st = hipHostGetDevicePointer(ptr_on_device, ptr, 0);
if (st == hipSuccess){
//you are in case 3.
}
else{
//you are in case 1.
}
}
The above pattern will be used whenever we need to process input data.
Obs.: parameter size is in bytes and
parameter allocated indicates whether memory was allocated
and needs to be release later on. */
/*
// original implementation
hipPointerGetAttributes(&att, ptr);
if (att.hostPointer == NULL)
{
checkPinnedBuffer(size);
hipMemcpy(m_pinned_buffer, ptr, size, hipMemcpyDefault);
return m_pinned_buffer;
}
else {
return ptr;
}
*/
*allocated = 0;
// get pointer to values on the device
rc = hipPointerGetAttributes(&att, ptr);
if (rc == hipSuccess)
{
//you are in case 2 or 4 from the above comment.
if (att.hostPointer == NULL)
{
//you are in case 2
checkPinnedBuffer(size);
rc = hipMemcpy(m_pinned_buffer, ptr, size, hipMemcpyDefault);
if (rc != hipSuccess)
{
FatalError("Could not copy into the temporary (host) storage. Try pinning the memory to avoid the hipMemcpy.", AMGX_ERR_BAD_PARAMETERS);
}
ptr_h = m_pinned_buffer;
*allocated = 1;
}
else
{
//you are in case 4
ptr_h = ptr;
}
}
else
{
//you are in case 1 or 3 from the above comment
ptr_h = ptr;
}
hipGetLastError(); //to reset last error
/* check for null pointers */
if (ptr_h == NULL)
{
FatalError("Result of (host) allocation of required temporary storage is NULL. Try pinning the memory to reduce storage requirements.", AMGX_ERR_BAD_PARAMETERS);
}
return ptr_h;
}
// if pointer is host pointer - returns data. If it is device pointer - copies it to the m_pinned_buffer and returns pointer to m_pinned_buffer
template <class TConfig>
const void *DistributedManagerBase<TConfig>::getHostPointerForData(const void *ptr, size_t size, int *allocated)
{
hipError_t rc;
hipPointerAttribute_t att;
void *ptr_h;
cudaCheckError();
/* WARNING: We may accept the following types of allocation for ptr:
1. malloc [host memory]
2. hipMalloc [device memory]
3. malloc + hipHostRegister [AMGX_pin_memory/AMGX_unpin_memory host memory]
4. hipHostMalloc [pinned host memory form the beginning]
The correct way to conver these cases is the following:
hipPointerAttribute_t att;
hipError_t st = hipPointerGetAttributes(&att, ptr);
if (st == hipSuccess) {
//you are in case 2, 3 or 4.
}
else{
//you are in case 1.
}
The following pattern of checks should be implemented
hipPointerAttribute_t att;
hipError_t st = hipPointerGetAttributes(&att, ptr);
if (st == hipSuccess) {
//you are in case 2 or 4.
}
else{
st = hipHostGetDevicePointer(ptr_on_device, ptr, 0);
if (st == hipSuccess){
//you are in case 3.
}
else{
//you are in case 1.
}
}
The above pattern will be used whenever we need to process input data.
Obs.: parameter size is in bytes and
parameter allocated indicates whether memory was allocated
and needs to be release later on. */
*allocated = 0;
// get pointer to values on the device
rc = hipPointerGetAttributes(&att, ptr);
if (rc == hipSuccess)
{
//you are in case 2 or 4 from the above comment.
if (att.hostPointer == NULL)
{
//you are in case 2
checkPinnedBuffer(size);
rc = hipMemcpy(m_pinned_buffer, ptr, size, hipMemcpyDefault);
if (rc != hipSuccess)
{
FatalError("Could not copy into the temporary (host) storage. Try pinning the memory to avoid the hipMemcpy.", AMGX_ERR_BAD_PARAMETERS);
}
ptr_h = m_pinned_buffer;
*allocated = 1;
hipGetLastError(); //to reset last error
return ptr_h;
}
else
{
//you are in case 4
hipGetLastError(); //to reset last error
return ptr;
}
}
else
{
hipGetLastError(); //to reset last error
//you are in case 1 or 3 from the above comment
return ptr;
}
}
template <class TConfig>
void *DistributedManagerBase<TConfig>::getDevicePointerForData(void *ptr, size_t size, int *allocated)
{
hipError_t rc;
hipPointerAttribute_t att;
void *ptr_d;
cudaCheckError();
/* WARNING: We may accept the following types of allocation for ptr:
1. malloc [host memory]
2. hipMalloc [device memory]
3. malloc + hipHostRegister [AMGX_pin_memory/AMGX_unpin_memory host memory]
4. hipHostMalloc [pinned host memory form the beginning]
The correct way to conver these cases is the following:
hipPointerAttribute_t att;
hipError_t st = hipPointerGetAttributes(&att, ptr);
if (st == hipSuccess) {
//you are in case 2, 3 or 4.
}
else{
//you are in case 1.
}
The following pattern of checks should be implemented
hipPointerAttribute_t att;
hipError_t st = hipPointerGetAttributes(&att, ptr);
if (st == hipSuccess) {
//you are in case 2 or 4.
}
else{
st = hipHostGetDevicePointer(ptr_on_device, ptr, 0);
if (st == hipSuccess){
//you are in case 3.
}
else{
//you are in case 1.
}
}
The above pattern will be used whenever we need to process input data.
Obs.: parameter size is in bytes and
parameter allocated indicates whether memory was allocated
and needs to be release later on. */
*allocated = 0;
// get pointer to values on the device
rc = hipPointerGetAttributes(&att, ptr);
if (rc == hipSuccess)
{
//you are in case 2 or 4 from the above comment.
ptr_d = (void *)att.devicePointer;
}
else
{
//you are in case 1 or 3 from the above comment
rc = hipHostGetDevicePointer(&ptr_d, ptr, 0);
if (rc != hipSuccess)
{
//you are in case 1
rc = hipMalloc(&ptr_d, size);
if (rc != hipSuccess)
{
FatalError("Could not allocate required temporary storage. Try pinning the memory to reduce storage requirements.", AMGX_ERR_BAD_PARAMETERS);
}
rc = hipMemcpy(ptr_d, ptr, size, hipMemcpyDefault);
if (rc != hipSuccess)
{
FatalError("Could not copy into the temporary storage. Try pinning the memory to avoid the hipMemcpy.", AMGX_ERR_BAD_PARAMETERS);
}
*allocated = 1;
}
}
/* check for null pointers */
if (ptr_d == NULL)
{
FatalError("Result of allocation of required temporary storage is NULL. Try pinning the memory to reduce storage requirements.", AMGX_ERR_BAD_PARAMETERS);
}
hipGetLastError(); //to reset last error
return ptr_d;
}
template <class TConfig>
const void *DistributedManagerBase<TConfig>::getDevicePointerForData(const void *ptr, size_t size, int *allocated)
{
hipError_t rc;
hipPointerAttribute_t att;
void *ptr_d;
cudaCheckError();
/* WARNING: We may accept the following types of allocation for ptr:
1. malloc [host memory]
2. hipMalloc [device memory]
3. malloc + hipHostRegister [AMGX_pin_memory/AMGX_unpin_memory host memory]
4. hipHostMalloc [pinned host memory form the beginning]
The correct way to conver these cases is the following:
hipPointerAttribute_t att;
hipError_t st = hipPointerGetAttributes(&att, ptr);
if (st == hipSuccess) {
//you are in case 2, 3 or 4.
}
else{
//you are in case 1.
}
The following pattern of checks should be implemented
hipPointerAttribute_t att;
hipError_t st = hipPointerGetAttributes(&att, ptr);
if (st == hipSuccess) {
//you are in case 2 or 4.
}
else{
st = hipHostGetDevicePointer(ptr_on_device, ptr, 0);
if (st == hipSuccess){
//you are in case 3.
}
else{
//you are in case 1.
}
}
The above pattern will be used whenever we need to process input data.
Obs.: parameter size is in bytes and
parameter allocated indicates whether memory was allocated
and needs to be release later on. */
*allocated = 0;
// get pointer to values on the device
rc = hipPointerGetAttributes(&att, ptr);
if (rc == hipSuccess)
{
//you are in case 2 or 4 from the above comment.
hipGetLastError(); //to reset last error
return (const void *)att.devicePointer;
}
else
{
//you are in case 1 or 3 from the above comment
rc = hipHostGetDevicePointer(&ptr_d, (void *)ptr, 0);
if (rc != hipSuccess)
{
//you are in case 1
rc = hipMalloc(&ptr_d, size);
if (rc != hipSuccess)
{
FatalError("Could not allocate required temporary storage. Try pinning the memory to reduce storage requirements.", AMGX_ERR_BAD_PARAMETERS);
}
rc = hipMemcpy(ptr_d, ptr, size, hipMemcpyDefault);
if (rc != hipSuccess)
{
FatalError("Could not copy into the temporary storage. Try pinning the memory to avoid the hipMemcpy.", AMGX_ERR_BAD_PARAMETERS);
}
*allocated = 1;
hipGetLastError(); //to reset last error
return (const void *)ptr_d;
}
}
/* check for null pointers */
if (ptr_d == NULL)
{
FatalError("Result of allocation of required temporary storage is NULL. Try pinning the memory to reduce storage requirements.", AMGX_ERR_BAD_PARAMETERS);
}
// shouldn't get there
hipGetLastError(); //to reset last error
return NULL;
}
template <class TConfig>
void initializeMatrixCopyAll(int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag, Matrix<TConfig> *A)
{
typedef typename TConfig::MatPrec mat_value_type;
A->resize( n, n, nnz, block_dimx, block_dimy );
//Upload the entire matrix
hipMemcpy( A->row_offsets.raw(), row_ptrs, (n + 1) * sizeof(int), hipMemcpyDefault );
cudaCheckError();
hipMemcpy( A->col_indices.raw(), col_indices, (nnz) * sizeof(int), hipMemcpyDefault );
cudaCheckError();
hipMemcpy( A->values.raw(), (mat_value_type *)data, (nnz * block_dimx * block_dimy) * sizeof(mat_value_type), hipMemcpyDefault );
cudaCheckError();
if (diag)
{
hipMemcpy( A->values.raw() + A->diagOffset()*A->get_block_size(), (mat_value_type *)diag, (n * block_dimx * block_dimy) * sizeof(mat_value_type), hipMemcpyDefault );
}
else
{
A->computeDiagonal();
}
cudaCheckError();
}
template <class TConfig>
void DistributedManagerBase<TConfig>::updateMapsReorder()
{
int my_id = this->getComms()->get_global_id();
DistributedComms<TConfig> *comms_tmp = this->getComms();
DistributedComms<TConfig> **comms_ = &comms_tmp;
// Copy B2L_maps in their final place
int num_neighbors = this->neighbors.size();
B2L_maps.resize(num_neighbors);
L2H_maps.resize(num_neighbors);
for (int i = 0; i < num_neighbors; i++)
{
B2L_maps[i] = this->cached_B2L_maps[i];
L2H_maps[i] = this->cached_L2H_maps[i];
}
//Create a DistributedArranger object to map further halo rings and to construct halo row matrices and exchange them (if halo_coloring != LAST)
DistributedArranger<TConfig> *prep = new DistributedArranger<TConfig>;
prep->create_B2L_from_maps( (*(this->A)), my_id, this->num_halo_rings(), neighbors,
B2L_maps, L2H_maps, B2L_rings, comms_, &halo_rows, &halo_btl);
DistributedManagerBaseInit(my_id, 0, this->A->get_num_rows(), *(this->A), comms_, NULL, NULL);
//Use the exchanged halo row matrices and the boundary/halo index lists to renumber and consolidate the matrix
this->reorder_matrix();
prep->initialize_B2L_maps_offsets(*(this->A), this->num_halo_rings());
delete prep;
}
template <class TConfig>
void DistributedManagerBase<TConfig>::initializeUploadReorderAll(int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag, Matrix<TConfig> &in_A)
{
this->A = &in_A;
initializeMatrixCopyAll<TConfig>(n, nnz, block_dimx, block_dimy, row_ptrs, col_indices, data, diag, this->A);
this->updateMapsReorder();
}
template <class TConfig>
void DistributedManagerBase<TConfig>::destroyComms()
{
if ( (this->_comms) != NULL )
{
if (this->_comms->decr_ref_count())
{
delete (this->_comms);
this->_comms = NULL;
}
}
if ( (this->m_fine_level_comms) != NULL)
{
if (this->m_fine_level_comms->decr_ref_count())
{
delete (this->m_fine_level_comms);
this->m_fine_level_comms = NULL;
}
}
}
template <class TConfig>
void DistributedManagerBase<TConfig>::initComms(Resources *rsrc)
{
this->createComms(rsrc);
int my_id = this->getComms()->get_global_id();
int num_parts = this->getComms()->get_num_partitions();
this->set_global_id(my_id);
this->set_num_partitions(num_parts);
}
template <class TConfig>
void DistributedManagerBase<TConfig>::createComms(Resources *rsrc)
{
// create communicator
#ifdef AMGX_WITH_MPI
destroyComms();
if (rsrc == NULL)
FatalError("Resources should not be NULL", AMGX_ERR_INTERNAL);
MPI_Comm *mpi_comm = rsrc->getMpiComm();
AMG_Config *cfg = rsrc->getResourcesConfig();
std::string comm_value, comm_scope;
cfg->getParameter<std::string>("communicator", comm_value, "default", comm_scope);
int rank = -1;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (comm_value == "MPI_DIRECT")
{
_comms = new CommsMPIDirect<TConfig>(*cfg, comm_scope, mpi_comm);
std::string comm_log("Using CUDA-Aware MPI (GPU Direct) communicator...\n");
amgx_distributed_output(comm_log.c_str(), comm_log.length());
}
else if (comm_value == "MPI")
{
_comms = new CommsMPIHostBufferStream<TConfig>(*cfg, comm_scope, mpi_comm);
std::string comm_log("Using Normal MPI (Hostbuffer) communicator...\n");
amgx_distributed_output(comm_log.c_str(), comm_log.length());
}
else
{
FatalError("Bad communicator value", AMGX_ERR_BAD_PARAMETERS);
}
#endif
}
template <class TConfig>
void DistributedManagerBase<TConfig>::malloc_export_maps(VecInt_t ***b2l_maps_e, VecInt_t **b2l_maps_sizes_e, VecInt_t ***l2h_maps_e, VecInt_t **l2h_maps_sizes_e)
{
*b2l_maps_e = (VecInt_t **) malloc(sizeof(VecInt_t *)*this->num_neighbors());
*l2h_maps_e = (VecInt_t **) malloc(sizeof(VecInt_t *)*this->num_neighbors());
*b2l_maps_sizes_e = (VecInt_t *) malloc(sizeof(VecInt_t) * (this->num_neighbors()));
*l2h_maps_sizes_e = (VecInt_t *) malloc(sizeof(VecInt_t) * (this->num_neighbors()));
for (int i = 0; i < this->num_neighbors(); i++)
{
(*b2l_maps_sizes_e)[i] = B2L_maps[i].size();
(*l2h_maps_sizes_e)[i] = L2H_maps[i].size();
(*b2l_maps_e)[i] = (VecInt_t *) malloc(sizeof(VecInt_t) * ( (*b2l_maps_sizes_e)[i]) );
if (L2H_maps[i].size() != 0)
{
(*l2h_maps_e)[i] = (VecInt_t *) malloc(sizeof(VecInt_t) * ( (*l2h_maps_sizes_e)[i]) );
thrust::copy(L2H_maps[i].begin(), L2H_maps[i].end(), (*l2h_maps_e)[i]);
}
thrust::copy(B2L_maps[i].begin(), B2L_maps[i].end(), (*b2l_maps_e)[i]);
}
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::createRenumbering(IVector &renumbering)
{
int num_neighbors = this->neighbors.size();
// still renumber if the number of neighbors = 0, to support non-symmetric matrices
// if (num_neighbors == 0) return;
/*
EXAMPLE
Example matrix, partition 1 arrives with state:
A.row_offsets = [0 4 11 15 20]
A.col_indices = [4 0 1 2
4 5 0 1 2 3 7
0 1 2 3
1 2 3 6 7]
num_neighbors=2; neighbors = [0 2]
B2L_rings[[0 2 4][0 2 4]] B2L_maps[[0 1| 2 3][1 3| 0 2]]
L2H_maps (and halo_lists) [[4 5][6 7]]
*/
int size = 0;
if (this->L2H_maps.size())
{
size = thrust_wrapper::reduce(this->A->col_indices.begin(), this->A->col_indices.end(), int(0), thrust::maximum<int>()) + 1; //Sufficient to do reduction on lth maps
cudaCheckError();
}
else
{
size = this->A->get_num_rows();
}
int rings = (this->B2L_rings.size() > 0) ? this->B2L_rings[0].size() - 1 : 0;
//initial size to size+1 so we have the total size after a scan
renumbering.resize(size + 1);
int global_size = size;
//
// Step 1 - in the main matrix, separate interior and boundary nodes (1/0 in flagArray), renumber interior ones with an exclusive scan
//
IVector flagArray(size + 1);
thrust::fill(flagArray.begin(), flagArray.begin() + size + 1, 1);
cudaCheckError();
//sets 1 for interior nodes, 0 for boundary node
for (int i = 0; i < num_neighbors; i++ )
{
int size = this->B2L_rings[i][1];
int num_blocks = min(4096, (size + 127) / 128);
if (size > 0)
{
hipLaunchKernelGGL(( remove_boundary_kernel) , dim3(num_blocks), dim3(128), 0, 0, flagArray.raw(), this->B2L_maps[i].raw(), size);
}
//If there are any L2H maps
if (this->L2H_maps.size() && this->L2H_maps[i].size())
{
int size = this->L2H_maps[i].size();
int num_blocks = min(4096, (size + 127) / 128);
hipLaunchKernelGGL(( remove_boundary_kernel) , dim3(num_blocks), dim3(128), 0, 0, flagArray.raw(), this->L2H_maps[i].raw(), size);
}
cudaCheckError();
}
//gets the renumbering of interior nodes
thrust_wrapper::exclusive_scan(flagArray.begin(), flagArray.begin() + size + 1, renumbering.begin());
cudaCheckError();
/*
EXAMPLE
After removing 1-ring boundary nodes and halo nodes from flagArray: [0 0 1 0 0 0 0 0]
After exclusive scan, which gives renumbering for interior nodes (only node #2)
renumbering: [0 0 0 1 1 1 1 1]
*/
//
// Step 2 - Renumber nodes that are in the boundary, stepping through each B2L map, and renumbering ones that have not been renumbered yet
//
//what is the biggest B2L size
INDEX_TYPE max_size = 0;
for (int i = 0; i < num_neighbors; i++)
{
max_size = max_size > this->B2L_rings[i][1] ? max_size : this->B2L_rings[i][1];
if (this->L2H_maps.size())
{
max_size = max_size > this->L2H_maps[i].size() ? max_size : this->L2H_maps[i].size();
}
}
//allocate work vectors (should be pretty small) that are used to renumber boundary nodes
IVector boundary_renum_flags(max_size);
IVector boundary_renum(max_size);
//the number of renumbered nodes so far
int max_element = renumbering[size];
this->_num_interior_nodes = max_element;
this->_num_boundary_nodes = this->A->get_num_rows() - max_element;
renumbering.resize(size);
/*
EXAMPLE
size = 8
max_size = 2, max_element = 1, num_interior_nodes=1, num_boundary_nodes = 4-1 = 3
*/
for (int i = 0; i < num_neighbors; i++)
{
//find nodes that are part of the current boundary and they haven't been renumbered yet
thrust::fill(boundary_renum_flags.begin(), boundary_renum_flags.begin() + max_size, 0);
int size = this->B2L_rings[i][1];
int num_blocks = min(4096, (size + 191) / 192);
if (size > 0)
hipLaunchKernelGGL(( get_unassigned_kernel) , dim3(num_blocks), dim3(192), 0, 0, flagArray.raw(),
this->B2L_maps[i].raw(),
boundary_renum_flags.raw(), size, global_size /*,rank*/);
//calculate the local renumbering (within this boundary region) of these nodes
thrust_wrapper::exclusive_scan(boundary_renum_flags.begin(), boundary_renum_flags.begin() + max_size, boundary_renum.begin());
//apply renumbering to the big numbering table
if (size > 0)
hipLaunchKernelGGL(( set_unassigned_kernel) , dim3(num_blocks), dim3(192), 0, 0, boundary_renum_flags.raw(),
boundary_renum.raw(),
this->B2L_maps[i].raw(),
renumbering.raw(),
size, max_element, global_size /*,rank*/);
//update the number of renumbered nodes
max_element += boundary_renum[max_size - 1] + boundary_renum_flags[max_size - 1];
/*
EXAMPLE
for neighbor 0 (ID 0)
boundary_renum_flags = [0 0], size = 2, flagArray [0 0 1 0 0 0 0 0]
get_unassigned_kernel's output: boundary_renum_flags = [1 1] flagArray [1 1 1 0 0 0 0 0]
after exclusive scan: boundary_renum [0 1]
set_unassigned_kernel updates these arrays and renumbers B2L map:
renumbering = [1 2 0 1 1 1 1 1] B2L_maps[0] = [1 2| 2 3] (note that after element 3 in renumbering and after element 2 we have invalid/not yet updated values)
max_element = 3
for neighbor 1 (ID 2)
get_unassigned_kernels's output: boundary_renum_flags [0 1] flagArray [1 1 1 1 0 0 0 0]
after exclusive scan boundary_renum [0 0]
set_unassigned_kernel renumbering [1 2 0 3 1 1 1 1] B2L_maps[1] = [2 3| 0 2]
max_element = 4
*/
}
cudaCheckError();
//Get renumbering for halo indices
if (this->L2H_maps.size())
{
//TODO: simplify this, we don't need to check whether it has already been renumbered, there is no overlap between halos
for (int i = 0; i < num_neighbors; i++)
{
//find nodes that are part of the current boundary and they haven't been renumbered yet
thrust::fill(boundary_renum_flags.begin(), boundary_renum_flags.begin() + max_size, 0);
int size = this->L2H_maps[i].size();
int num_blocks = min(4096, (size + 191) / 192);
if (size > 0)
hipLaunchKernelGGL(( get_unassigned_kernel) , dim3(num_blocks), dim3(192), 0, 0, flagArray.raw(),
this->L2H_maps[i].raw(),
boundary_renum_flags.raw(), size, global_size /*,rank*/);
//calculate the local renumbering (within this boundary region) of these nodes
thrust_wrapper::exclusive_scan(boundary_renum_flags.begin(), boundary_renum_flags.begin() + max_size, boundary_renum.begin());
//apply renumbering to the big numbering table
if (size > 0)
hipLaunchKernelGGL(( set_unassigned_kernel) , dim3(num_blocks), dim3(192), 0, 0, boundary_renum_flags.raw(),
boundary_renum.raw(),
this->L2H_maps[i].raw(),
renumbering.raw(),
size, max_element, global_size /*,rank*/);
//update the number of renumbered nodes
max_element += boundary_renum[max_size - 1] + boundary_renum_flags[max_size - 1];
/*
EXAMPLE
for neighbor 0 (ID 0)
boundary_renum_flags = [0 0], size = 2, flagArray [1 1 1 1 0 0 0 0]
get_unassigned_kernel's output: boundary_renum_flags = [1 1] flagArray [1 1 1 1 1 1 0 0]
after exclusive scan: boundary_renum [0 1]
set_unassigned_kernel updates these arrays and renumbers B2L map:
renumbering = [1 2 0 3 4 5 1 1] L2H_maps[0] = [4 5]
max_element = 6
for neighbor 1 (ID 2)
get_unassigned_kernels's output: boundary_renum_flags [1 1] flagArray [1 1 1 1 1 1 1 1]
after exclusive scan boundary_renum [0 1]
set_unassigned_kernel renumbering = [1 2 0 3 4 5 6 7] L2H_maps[1] = [6 7]
max_element = 8
*/
}
cudaCheckError();
}
//apply renumbering to further halo rings too
if (rings > 1)
{
for (int i = 0; i < num_neighbors; i++)
{
int size = this->B2L_rings[i][this->B2L_rings[i].size() - 1] - this->B2L_rings[i][1];
int num_blocks = min(4096, (size + 127) / 128);
hipLaunchKernelGGL(( renumber_b2l_maps) , dim3(num_blocks), dim3(128), 0, 0, this->B2L_maps[i].raw() + this->B2L_rings[i][1], renumbering.raw(), size, global_size /*, rank*/);
}
cudaCheckError();
}
/*
EXAMPLE
renumbers further boundary rings as listed in B2L_maps, since they have not been replaced yet with their renumbered values
B2L_maps [[1 2| 0 3][2 3| 1 0]]
*/
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::reorder_matrix_owned()
{
int num_neighbors = this->neighbors.size();
int size = this->A->get_num_rows();
int num_blocks = min(4096, (size + 511) / 512);
int rings = (this->B2L_rings.size() > 0) ? this->B2L_rings[0].size() - 1 : 0;
this->set_num_halo_rings(rings);
int diag = this->A->hasProps(DIAG);
if (diag)
{
FatalError("External diag not supported in classical path", AMGX_ERR_NOT_IMPLEMENTED);
}
//
// Step 1 & 2 - create renumbering
//
this->createRenumbering(this->renumbering);
//now we have the full renumbering table in renum, calculate the inverse
this->inverse_renumbering.resize(this->renumbering.size());
if (this->renumbering.size() > 1)
{
hipLaunchKernelGGL(( calc_inverse_renumbering) , dim3(min(4096, ((int)this->renumbering.size() + 511) / 512)), dim3(512) , 0, 0, this->renumbering.raw(), this->inverse_renumbering.raw(), this->renumbering.size());
cudaCheckError();
}
//
// Step 4 - calculate number/offset of nodes in the halos from the neighbors, ring by ring
//
this->halo_offsets.resize(num_neighbors + 1);
this->halo_offsets[0] = size;
for (int i = 0; i < num_neighbors; i++)
{
this->halo_offsets[i + 1] = this->halo_offsets[i] + this->L2H_maps[i].size();
}
this->set_num_halo_rows(this->halo_offsets[this->halo_offsets.size() - 1] - size);
int nh = this->num_halo_rows();
int total_rows = size + nh;
cudaCheckError();
//
// Step 6 - renumber halo matrices and calculate row length (to eventually append to the big matrix)
//
int insert = 0;
//recalculate row_offsets
IVector new_row_offsets(size + 1);
if (num_blocks > 0)
{
hipLaunchKernelGGL(( calc_rowlen_reorder) , dim3(num_blocks), dim3(512), 0, 0, this->A->row_offsets.raw(), new_row_offsets.raw(), this->renumbering.raw(), size, insert);
cudaCheckError();
}
thrust::copy(thrust::make_permutation_iterator(this->renumbering.begin(), this->A->col_indices.begin()),
thrust::make_permutation_iterator(this->renumbering.begin(), this->A->col_indices.end()),
this->A->col_indices.begin());
cudaCheckError();
//row_offsets array created by exclusive scan of row sizes
thrust_wrapper::exclusive_scan(new_row_offsets.begin(), new_row_offsets.begin() + size + 1, new_row_offsets.begin());
cudaCheckError();
//
// Step 7 - consolidate column indices and values
//
int new_nnz = new_row_offsets[new_row_offsets.size() - 1];
typedef typename MatPrecisionMap<t_matPrec>::Type ValueTypeA;
VVector new_values((new_nnz + 1 )* this->A->get_block_size(), types::util<ValueTypeA>::get_zero());
IVector new_col_indices(new_nnz, 0);
//reorder based on row permutation
if (num_blocks > 0)
{
hipLaunchKernelGGL(( reorder_whole_matrix <32>) , dim3(num_blocks), dim3(512), 0, 0, this->A->row_offsets.raw(), this->A->col_indices.raw(), this->A->values.raw(), new_row_offsets.raw(), new_col_indices.raw(), new_values.raw(), this->renumbering.raw(), this->A->get_block_size(), size, insert);
cudaCheckError();
}
//create and append halo rows size
//create an identity matrix in CSR format
int nnz = this->A->get_num_nz();
IVector identity_csr_rows(nh + 1);
IVector identity_csr_cols(nh);
VVector identity_csr_vals(nh, types::util<ValueTypeA>::get_one()); //needs to be changed to MVector, but this definition is messed up in the header file (should fix later)
thrust::sequence(identity_csr_rows.begin(), identity_csr_rows.end());
thrust::sequence(identity_csr_cols.begin(), identity_csr_cols.end());
/*for example, 2x2 identity_csr matrix is created:
identity_csr_rows = { 0, 1, 2 }
identity_csr_cols = { 0, 1 }
identity_csr_vals = { 1.0, 1.0 } */
//shift identity tmatrix by size = this->A->get_num_rows();
thrust::transform(identity_csr_rows.begin(), identity_csr_rows.end(), thrust::constant_iterator<INDEX_TYPE>(nnz), identity_csr_rows.begin(), thrust::plus<INDEX_TYPE>());
thrust::transform(identity_csr_cols.begin(), identity_csr_cols.end(), thrust::constant_iterator<INDEX_TYPE>(size), identity_csr_cols.begin(), thrust::plus<INDEX_TYPE>());
/*for example, 2x2 identity_csr matrix is created:
identity_csr_rows = { 0, 1, 2 }
identity_csr_cols = {size, size+1 }
identity_csr_vals = { 1.0, 1.0 } */
/* WARNING: you must be very careful with the view you are setting (cuurently the view coming here by default is ALL = FULL). If
- classical path is selected then the createOneRingHaloRows -> create_one_ring_halo_rows -> append_halo_rows
routine will be called. It will overwrite the halo rows setup here (and will use view OWNED, which will ignore the
halo rows setup here, to determine how the new halo rows should be placed).
- aggregation path is selected then the extra rows setup here will be used in the R*A*P product, where (in order to match
dimensions of R and P) it is assumed that (the local partition) matrix A is square, therefore it must be padded by identity
rows at the bottom to compensate for the "extra" columns that are outside of the main square part. The old routines for the
aggregation path do this padding at the end of the reorder_matrix routine below. */
//ViewType v = this->A->currentView();
//this->A->setView(ALL);
//Approach 1: use existing routine to append the identity matrix to the existing one
// (seems like too much overhead, also need identity matrix per neighbor)
//DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>;
//prep->append_halo_rows(this->A, identity_csr_rows, identity_csr_cols, identity_csr_vals);
//delete prep;
//Approach 2: custom for this routine
new_row_offsets.resize(total_rows + 1);
new_col_indices.resize(nnz + nh);
new_values.resize(nnz + nh + 1); //extra 1 element stores zero at the end (to follow the original design)
//new_values[nnz]=-1; //marker to track the last element
thrust::copy(identity_csr_rows.begin(), identity_csr_rows.end(), new_row_offsets.begin() + size );
thrust::copy(identity_csr_cols.begin(), identity_csr_cols.end(), new_col_indices.begin() + nnz);
thrust::copy(new_values.begin() + nnz, new_values.begin() + nnz + 1, new_values.begin() + nnz + nh);
thrust::copy(identity_csr_vals.begin(), identity_csr_vals.end(), new_values.begin() + nnz);
/* WARNING: see above. */
this->A->set_num_cols(total_rows);
this->A->set_num_rows(total_rows);
this->A->col_indices.swap(new_col_indices);
new_row_offsets.resize(total_rows + 1);
this->A->row_offsets.swap(new_row_offsets);
new_row_offsets.swap(this->old_row_offsets);
this->A->values.swap(new_values);
this->A->m_seq_offsets.resize(total_rows + 1);
thrust::sequence(this->A->m_seq_offsets.begin(), this->A->m_seq_offsets.end());
cudaCheckError();
//TODO: only do this if AMG_Config matrix_halo_exchange!=2
this->A->delProps(COO);
if (!insert)
{
this->A->computeDiagonal();
}
this->set_initialized(this->A->row_offsets);
this->A->setView(OWNED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::reorder_matrix()
{
int num_neighbors = this->neighbors.size();
if (num_neighbors == 0) { return; }
int size = this->A->get_num_rows();
int num_blocks = min(4096, (size + 511) / 512);
int rings = this->B2L_rings[0].size() - 1;
this->set_num_halo_rings(rings);
int diag = this->A->hasProps(DIAG);
std::vector<Matrix<TConfig_d> > &halo_rows = *this->halo_rows;
std::vector<DistributedManager<TConfig_d> > &halo_btl = *this->halo_btl;
/*
EXAMPLE
The example matrix, on partition 1 arrives at this point with the following state:
num_rings=2
A.num_rows = 4; A.num_nz = 20
A.row_offsets = [0 4 11 15 20]
A.col_indices = [4 0 1 2
4 5 0 1 2 3 7
0 1 2 3
1 2 3 6 7]
num_neighbors=2; neighbors = [0 2]
B2L_rings[[0 2 4][0 2 4]] B2L_maps[[0 1| 2 3][1 3| 0 2]]
L2H_maps (and halo_lists) [[4 5][6 7]]
With the exchange halo rows:
halo_btl[0] (received from neighbor ID 0)
global_id = 0; base_index=0; index_range=6; B2L_rings[0] = [0 2 4]; B2L_maps[0] = [2 3| 0 1] L2H_maps = [4 5]
halo_rows[0].row_offsets = [0 5 13 17 21]
halo_rows[0].col_indices = [1 2 3 4 5
0 1 2 3 4 5 6 7
0 1 3 6
0 1 2 3]
halo_btl[1] (received from neighbor ID 2)
global_id = 2; base_index=0; index_range=8; B2L_rings[0] = [0 2 4]; B2L_maps[0] = [1 2| 0 3] L2H_maps = [6 7]
halo_rows[1].row_offsets = [0 4 11 16 20]
halo_rows[1].col_indices = [7 1 2 3
5 6 7 0 1 2 3
4 5 0 2 3
0 1 2 3]
*/
//
// Step 1 & 2 - create renumbering
//
this->createRenumbering(this->renumbering);
cudaCheckError();
/*
EXAMPLE
this->renumbering = [1 2 0 3 4 5 6 7]
B2L_maps = [[1 2| 0 3][2 3| 1 0]]
L2H_maps = [[4 5][6 7]]
*/
//
// Step 3 - given a full renumbering of owned nodes, calculate inverse renumbering
//
//now we have the full renumbering table in renum, calculate the inverse
this->inverse_renumbering.resize(this->renumbering.size());
hipLaunchKernelGGL(( calc_inverse_renumbering) , dim3(min(4096, ((int)this->renumbering.size() + 511) / 512)), dim3(512) , 0, 0, this->renumbering.raw(), this->inverse_renumbering.raw(), this->renumbering.size());
cudaCheckError();
/*
EXAMPLE
this->inverse_renumbering = [2 0 1 3 4 5 6 7]
*/
//
// Step 4 - calculate number/offset of nodes in the halos from the neighbors, ring by ring
//
this->halo_offsets.resize(rings * num_neighbors + 1, 0);
for (int ring = 0; ring < rings; ring++)
{
for (int i = 0; i < num_neighbors; i++)
{
this->halo_offsets[ring * num_neighbors + i] = halo_btl[i].B2L_rings[0][ring + 1] - halo_btl[i].B2L_rings[0][ring];
}
}
thrust::exclusive_scan(this->halo_offsets.begin(), this->halo_offsets.end(), this->halo_offsets.begin(), size);
cudaCheckError();
this->set_num_halo_rows(this->halo_offsets[this->halo_offsets.size() - 1] - size);
int total_rows = size + this->num_halo_rows();
if (total_rows < this->renumbering.size()) { FatalError("total rows < renumbering.size(), send/recv maps should cover all matrix halo columns", AMGX_ERR_NOT_IMPLEMENTED); }
if (total_rows > this->renumbering.size())
{
this->A->getResources()->warning("# owned nodes + # halo nodes > matrix columns: send/recv maps have some unreferences halo indices, they are not directly connected to our partition and therefore we won't compute them, please use 2-ring comms maps if you want to specify 2nd ring neighbors");
}
cudaCheckError();
/*
EXAMPLE
halo_offsets [2 2 2 2]
after exclusive scan: 4 + [0 2 4 6 8] = [4 6 8 10 12]
num_halo_rows = 8, total_rows = 12
*/
//
// Step 5 - create big mapping table of all halo indices we received (this may use a little too much memory)
//
//count number of fine rows of neighbors
thrust::host_vector<INDEX_TYPE> neighbor_rows(num_neighbors + 1);
int max_num_rows = 0;
for (int i = 0; i < num_neighbors; i++)
{
neighbor_rows[i] = halo_btl[i].index_range();
max_num_rows = max_num_rows > halo_rows[i].get_num_rows() ? max_num_rows : halo_rows[i].get_num_rows();
}
thrust::exclusive_scan(neighbor_rows.begin(), neighbor_rows.end(), neighbor_rows.begin());
cudaCheckError();
int total_rows_of_neighbors = neighbor_rows[num_neighbors];
/*
EXAMPLE
neigbor_rows = [0 6 14]
total_rows_of_neighbors = 14
*/
IVector halo_mapping(total_rows_of_neighbors);
thrust::fill(halo_mapping.begin(), halo_mapping.end(), -1);
cudaCheckError();
//ring by ring, neighbor by neighbor assign sequentially increasing numbers for halo nodes
for (int ring = 0; ring < rings; ring++)
{
for (int i = 0; i < num_neighbors; i++)
{
int size = halo_btl[i].B2L_rings[0][ring + 1] - halo_btl[i].B2L_rings[0][ring];
int num_blocks = min(4096, (size + 127) / 128);
//This renumbering has to result in the same renumbering that comes out of L2H renumbering
hipLaunchKernelGGL(( create_halo_mapping) , dim3(num_blocks), dim3(128), 0, 0, halo_mapping.raw() + neighbor_rows[i],
halo_btl[i].B2L_maps[0].raw() + halo_btl[i].B2L_rings[0][ring],
halo_btl[i].base_index(), this->halo_offsets[ring * num_neighbors + i], size);
cudaCheckError();
/*
EXAMPLE
ring 0 neighbor 0 - halo_btl[0].B2L_maps[0] = [2 3| 0 1] halo_btl[0].L2H_maps = [4 5]
halo_mapping = [-1 -1 4 5 -1 -1 |-1 -1 -1 -1 -1 -1 -1 -1]
ring 0 neighbor 1 - halo_btl[1].B2L_maps[0] = [1 2| 0 3] halo_btl[1].L2H_maps = [6 7]
halo_mapping = [-1 -1 4 5 -1 -1 |-1 6 7 -1 -1 -1 -1 -1]
ring 1 neighbor 0 - halo_btl[0].B2L_maps[0] = [2 3| 0 1] halo_btl[0].L2H_maps = [4 5]
halo_mapping = [8 9 4 5 -1 -1 |-1 6 7 -1 -1 -1 -1 -1]
ring 1 neighbor 1 - halo_btl[1].B2L_maps[0] = [1 2| 0 3] halo_btl[1].L2H_maps = [6 7]
halo_mapping = [8 9 4 5 -1 -1 |10 6 7 11 -1 -1 -1 -1]
*/
}
}
cudaCheckError();
for (int i = 0; i < num_neighbors; i++)
{
int size = halo_btl[i].L2H_maps[0].size();
int num_blocks = min(4096, (size + 127) / 128);
//Map the column indices of the halo rows that point back to boundary nodes
hipLaunchKernelGGL(( apply_h2l2b_mapping) , dim3(num_blocks), dim3(128), 0, 0, halo_mapping.raw() + neighbor_rows[i],
halo_btl[i].L2H_maps[0].raw(),
halo_btl[i].base_index(), this->B2L_maps[i].raw(), size);
cudaCheckError();
/*
EXAMPLE
neighbor 0 - mapping back to our own (boundary) indices
halo_mapping = [8 9 4 5 1 2 |10 6 7 11 -1 -1 -1 -1]
neighbor 1 - mapping back to our own (boundary) indices
halo_mapping = [8 9 4 5 1 2 |10 6 7 11 -1 -1 2 3]
*/
}
cudaCheckError();
/*
EXAMPLE
neighbor_rows = [0 6 14]
halo_mapping = [8 9 4 5 1 2 |10 6 7 11 -1 -1 2 3]
The first part (0-6) of halo_mapping gives a local index for all the indices that we want to know about in halo_btl[0]
The second part (7-14) gives local indices for halo_btl[1], that is both halo ring there, and the column indices representing vertices in this partition's boundary.
Note that it does not give indices (-1) for vertices 5 and 6 in neighbor 1 (ID 2), which are column indices connecting it to neighbor 0, hence the two halo regions are not connected
*/
//
// Step 6 - renumber halo matrices and calculate row length (to eventually append to the big matrix)
//
int insert = 0;
if (this->A->hasProps(DIAG) && insertDiagonals) { insert = 1; }
diag = diag && !insertDiagonals;
//recalculate row_offsets
IVector new_row_offsets(size + this->num_halo_rows() + 1);
hipLaunchKernelGGL(( calc_rowlen_reorder) , dim3(num_blocks), dim3(512), 0, 0, this->A->row_offsets.raw(), new_row_offsets.raw(), this->renumbering.raw(), size, insert);
cudaCheckError();
IVector neighbor_rows_d(num_neighbors + 1);
thrust::copy(neighbor_rows.begin(), neighbor_rows.end(), neighbor_rows_d.begin());
cudaCheckError();
/*
EXAMPLE
get row length according to renumbering
new_row_offsets = [4 4 7 5 0 0 0 0 0 0 0 0 0]
*/
//map column indices of my own matrix
/*map_col_indices<4><<<num_blocks, 512>>>(this->A->row_offsets.raw(),
this->A->col_indices.raw(),
this->renumbering.raw(),
this->halo_ranges.raw(),
halo_mapping.raw(),
neighbor_rows_d.raw(),
this->base_index(), num_neighbors, size);*/
thrust::copy(thrust::make_permutation_iterator(this->renumbering.begin(), this->A->col_indices.begin()),
thrust::make_permutation_iterator(this->renumbering.begin(), this->A->col_indices.end()),
this->A->col_indices.begin());
cudaCheckError();
/*
EXAMPLE
use this->renumbering = [1 2 0 3 4 5 6 7]
to map old column indices to new column indices (i.e. according to interior - boundary - halo separation), but do not reshuffle them into their place yet
A.col_indices = [4 0 1 2
4 5 0 1 2 3 7
0 1 2 3
1 2 3 6 7]
becomes
A.col_indices = [4 1 2 0
4 5 1 2 0 3 7
1 2 0 3
2 0 3 6 7]
*/
cudaCheckError();
IVector temp_row_len(max_num_rows);
for (int i = 0; i < num_neighbors; i++)
{
//map column indices of halo matrices and count of nonzeros we will keep
int size = halo_rows[i].get_num_rows();
int num_blocks = min(4096, (size + 127) / 128);
hipLaunchKernelGGL(( map_col_indices_and_count_rowlen<4>) , dim3(num_blocks), dim3(128), 128 * sizeof(INDEX_TYPE), 0,
halo_rows[i].row_offsets.raw(),
halo_rows[i].col_indices.raw(),
temp_row_len.raw(),
halo_mapping.raw() + neighbor_rows[i],
size, insert);
cudaCheckError();
//number of nonzeros per row copied into big row sizes array
for (int ring = 0; ring < rings; ring++)
{
thrust::copy(temp_row_len.begin() + halo_btl[i].B2L_rings[0][ring], temp_row_len.begin() + halo_btl[i].B2L_rings[0][ring + 1], new_row_offsets.begin() + this->halo_offsets[ring * num_neighbors + i]);
}
cudaCheckError();
/*
EXAMPLE
halo_mapping = [8 9 4 5 1 2 |10 6 7 11 -1 -1 2 3]
look at halo row matrices, and halo_mapping, count column indices that do not map to -1 and map them to their new, local index
halo_rows[0].col_indices = [1 2 3 4 5
0 1 2 3 4 5 6 7
0 1 3 6
0 1 2 3]
becomes
halo_rows[0].col_indices = [9 4 5 1 2
8 9 4 5 1 2 -1 -1
8 9 5 -1
8 9 4 5]
with temp_row_len = [5 6 3 4] copied into new_row_offsets: [4 4 7 5| 5 6| 0 0| 3 4| 0 0 0]
halo_rows[1].col_indices = [7 1 2 3
5 6 7 0 1 2 3
4 5 0 2 3
0 1 2 3]
becomes
halo_rows[1].col_indices = [3 6 7 11
-1 2 3 10 6 7 11
-1 -1 10 7 11
10 6 7 11]
with temp_row_len = [4 6 3 4] copied into new_row_offsets: [4 4 7 5| 5 6| 4 6| 3 4| 3 4 0]
*/
}
cudaCheckError();
//row_offsets array created by exclusive scan of row sizes
thrust_wrapper::exclusive_scan(new_row_offsets.begin(), new_row_offsets.begin() + size + this->num_halo_rows() + 1, new_row_offsets.begin());
cudaCheckError();
/*
EXAMPLE
Exclusive scan to get new_row_offsets array:
new_row_offsets = [0 4 8 15 20| 25 31| 35 41| 44 48| 51 55]
*/
//
// Step 7 - consolidate column indices and values
//
int new_nnz = new_row_offsets[new_row_offsets.size() - 1];
typedef typename MatPrecisionMap<t_matPrec>::Type ValueTypeA;
VVector new_values((new_nnz + 1 + diag * (total_rows - 1))* this->A->get_block_size(), types::util<ValueTypeA>::get_zero());
IVector new_col_indices(new_nnz, 0);
//reorder based on row permutation
hipLaunchKernelGGL(( reorder_whole_matrix <32>) , dim3(num_blocks), dim3(512), 0, 0, this->A->row_offsets.raw(), this->A->col_indices.raw(), this->A->values.raw(), new_row_offsets.raw(), new_col_indices.raw(), new_values.raw(), this->renumbering.raw(), this->A->get_block_size(), size, insert);
cudaCheckError();
if (diag)
{
//reorder based on row permutation
hipLaunchKernelGGL(( reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, new_values.raw() + new_row_offsets[total_rows]*this->A->get_block_size(),
this->A->values.raw() + this->A->row_offsets[size]*this->A->get_block_size(),
this->renumbering.raw(),
this->A->get_block_size(), size);
cudaCheckError();
}
int cumulative_num_rows = size;
for (int i = 0; i < num_neighbors; i++)
{
for (int ring = 0; ring < rings; ring++)
{
int num_rows = halo_btl[i].B2L_rings[0][ring + 1] - halo_btl[i].B2L_rings[0][ring];
int num_blocks = min(4096, (num_rows + 127) / 128);
//copy in nonzeros that we are keeping
//TODO: access pattern - should be implemented with warp-wide scans to decide which nonzeros we are keeping and where the rest is going
hipLaunchKernelGGL(( reorder_whole_halo_matrix) , dim3(num_blocks), dim3(128), 0, 0, halo_rows[i].row_offsets.raw() + halo_btl[i].B2L_rings[0][ring],
halo_rows[i].col_indices.raw(), halo_rows[i].values.raw(),
new_row_offsets.raw() + this->halo_offsets[ring * num_neighbors + i],
new_col_indices.raw(), new_values.raw(), NULL, this->A->get_block_size(), num_rows,
insert, this->halo_offsets[ring * num_neighbors + i], halo_btl[i].B2L_rings[0][ring], halo_btl[i].B2L_rings[0][rings]);
if (diag)
{
thrust::copy(halo_rows[i].values.begin() + (halo_rows[i].row_offsets[halo_rows[i].get_num_rows()] + halo_btl[i].B2L_rings[0][ring])*this->A->get_block_size(),
halo_rows[i].values.begin() + (halo_rows[i].row_offsets[halo_rows[i].get_num_rows()] + halo_btl[i].B2L_rings[0][ring + 1])*this->A->get_block_size(),
new_values.begin() + (new_row_offsets[total_rows] + cumulative_num_rows)*this->A->get_block_size());
cumulative_num_rows += num_rows;
}
}
}
cudaCheckError();
/*
EXAMPLE
copy everything in place, dropping -1 column indices in the halo and reordering the owned rows
new_row_offsets = [0 4 8 15 20| 25 31| 35 41| 44 48| 51 55]
new_col_indices = [1 2 0 3
4 1 2 0
4 5 1 2 0 3 7
2 0 3 6 7 -end of owned
9 4 5 1 2
8 9 4 5 1 2 - end of neighbor 0 ring 0
3 6 7 11
2 3 10 6 7 11 - end of neighbor 1 ring 0
8 9 5
8 9 4 5 - end of neighbor 0 ring 1
10 7 11
10 6 7 11] - end of neighbor 1 ring 1
*/
this->A->set_num_cols(total_rows);
this->A->set_num_rows(size);
this->A->col_indices.swap(new_col_indices);
new_row_offsets.resize(total_rows + 1);
this->A->row_offsets.swap(new_row_offsets);
new_row_offsets.swap(this->old_row_offsets);
this->A->values.swap(new_values);
this->A->m_seq_offsets.resize(total_rows + 1);
thrust::sequence(this->A->m_seq_offsets.begin(), this->A->m_seq_offsets.end());
if (insert)
{
this->A->delProps(DIAG);
this->A->diag.resize(total_rows);
thrust::copy(this->A->row_offsets.begin(), this->A->row_offsets.end() - 1, this->A->diag.begin());
}
cudaCheckError();
delete this->halo_rows;
delete this->halo_btl;
//set halo_rows and halo_btl to NULL to avoid a potential double free situation in the future
this->halo_rows = NULL;
this->halo_btl = NULL;
this->A->delProps(COO);
this->A->set_initialized(1);
//TODO: only do this if AMG_Config matrix_halo_exchange!=2
if (!insert) { this->A->computeDiagonal(); }
this->A->setView(OWNED);
}
//function object (functor) for thrust calls (it is a unary operator to add a constant)
template<typename T>
class add_constant_op
{
const T c;
public:
add_constant_op(T _c) : c(_c) {}
__host__ __device__ T operator()(const T &x) const
{
return x + c;
}
};
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::obtain_shift_l2g_reordering(index_type n, I64Vector_d &l2g, IVector_d &p, IVector_d &q)
{
/* WARNING: Exchange halo of the inverse_reordering, which is implicitly based on the local_to_global_map (l2g).
Notice that it is implicit in the exchange_halo routine, since you are getting exactly the vector
halo elements, which are exactly the elements you need. They however must be shifted by the partition
starting points (starting global row indices, which are containe din array part_offsets).
This allows us to avoid constructing the global vector for inverse permutation,
as is done in reference MATLAB code. */
//Recall that part_offsets provide the starting point (global row index) of every partition, in other words,
//they contain the prefix sum of number of rows assigned to each partition. Also, notice that part_offsets and
//part_offsets_h have the same values on device and host, respectively. See below few lines for details:
index_type tag = 1 * 133 + 3 * 7 + 0; //some random number for the tag
index_type l = p.size();
q.resize(l);
thrust::copy (p.begin(), p.end(), q.begin());
thrust::transform(q.begin(), q.end(), q.begin(), add_constant_op<index_type>(this->part_offsets[this->global_id()]));
this->exchange_halo(q, tag);
thrust::sequence (q.begin(), q.begin() + n);
thrust::transform(q.begin(), q.begin() + n, q.begin(), add_constant_op<index_type>(this->part_offsets[this->global_id()]));
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::unpack_partition(index_type *Bp, index_type *Bc, mat_value_type *Bv)
{
index_type l, n, nnz, offset;
index_type *ir;
index_type *Ap;
index_type *Ac;
mat_value_type *Av;
IVector q;
//some initializations
this->A->getOffsetAndSizeForView(OWNED, &offset, &n);
this->A->getNnzForView(OWNED, &nnz);
l = this->inverse_renumbering.size();
ir = this->inverse_renumbering.raw();
Ap = this->A->row_offsets.raw();
Ac = this->A->col_indices.raw();
Av = this->A->values.raw();
//(i) reorder the matrix back (into mixed interior-boundary nodes)
//applies to rows and columns (out-of-place)
reorder_partition<index_type, mat_value_type, true, true>
(n, nnz, Ap, Ac, Av, Bp, Bc, Bv, l, ir);
cudaCheckError();
//obtain reordering q that combines the shift of the diagonal block with the off-diagonal block indices conversion from local to global
this->obtain_shift_l2g_reordering(n, this->local_to_global_map, this->inverse_renumbering, q);
cudaCheckError();
//(ii) reorder the matrix back (shift the diagonal block and convert off-diagonal block column indices from local to global)
//applies columns only (in-place)
reorder_partition<index_type, mat_value_type, false, true>
(n, nnz, Bp, Bc, Bv, Bp, Bc, Bv, q.size(), q.raw());
cudaCheckError();
}
template <class TConfig>
void DistributedManagerBase<TConfig>::createNeighToDestPartMap(IVector_h &neigh_to_part, IVector_h &neighbors, IVector_h &destination_part, int num_neighbors)
{
neigh_to_part.resize(num_neighbors);
for (int i = 0; i < num_neighbors; i++)
{
neigh_to_part[i] = destination_part[neighbors[i]];
}
}
template <class TConfig>
void DistributedManagerBase<TConfig>::createConsolidatedNeighToPartMap(IVector_h &cons_neigh_to_part, IVector_h &neigh_to_part, int my_destination_part, IVector_h &destination_part, int &num_cons_neighbors)
{
// input: non-initialized cons_neigh_to_part
// fine_neigh_to_part
// my_destination_part
// output: cons_neigh_to_part
// num_cons_neighbors
cons_neigh_to_part = neigh_to_part;
thrust::sort(cons_neigh_to_part.begin(), cons_neigh_to_part.end());
cudaCheckError();
cons_neigh_to_part.erase(thrust::unique(cons_neigh_to_part.begin(), cons_neigh_to_part.end()), cons_neigh_to_part.end());
// Remove if fine_neigh maps to same coarse partition
cons_neigh_to_part.erase(thrust::remove_if(cons_neigh_to_part.begin(), cons_neigh_to_part.end(), is_my_part(my_destination_part)), cons_neigh_to_part.end());
num_cons_neighbors = cons_neigh_to_part.size();
cudaCheckError();
}
template <class TConfig>
void DistributedManagerBase<TConfig>::createNeighToConsNeigh(IVector_h &neigh_to_cons_neigh, IVector_h &cons_neigh_to_part, IVector_h &neigh_to_part, int my_destination_part, int &num_neighbors)
{
neigh_to_cons_neigh.resize(num_neighbors);
thrust::lower_bound(cons_neigh_to_part.begin(), cons_neigh_to_part.end(), neigh_to_part.begin(), neigh_to_part.end(), neigh_to_cons_neigh.begin());
cudaCheckError();
// Flagging fine neighbors that go to same partition (haven't been found in previous step)
for (int i = 0; i < num_neighbors; i++)
{
if ( neigh_to_part[i] == my_destination_part)
{
neigh_to_cons_neigh[i] = -1;
}
}
}
template <class TConfig>
template <class IVector_hd>
void DistributedManagerBase<TConfig>::consB2Lmaps(std::vector<IVector_hd> &dest_coarse_B2L_maps, std::vector<IVector_hd> &coarse_B2L_maps, IVector_h &fine_neigh_to_coarse_neigh, int num_coarse_neighbors, int num_fine_neighbors)
{
//Merge B2L fine maps per coarse destination
dest_coarse_B2L_maps.resize(num_coarse_neighbors);
std::vector<int> dest_coarse_B2L_maps_scratch_sizes(num_coarse_neighbors, 0);
int my_id = this->global_id();
// Loop over the fine neighbors, to compute size of each dest_coarse_B2L_maps
for (int i = 0; i < num_fine_neighbors; i++)
{
int k = fine_neigh_to_coarse_neigh[i];
if (k != -1)
{
dest_coarse_B2L_maps_scratch_sizes[k] += coarse_B2L_maps[i].size();
}
}
// Now fill dest_coarse_B2L_maps
for (int k = 0; k < num_coarse_neighbors; k++)
{
dest_coarse_B2L_maps[k].resize( dest_coarse_B2L_maps_scratch_sizes[k] );
// Reset sizes to 0 (fill use as offset in next loop);
dest_coarse_B2L_maps_scratch_sizes[k] = 0;
}
for (int i = 0; i < num_fine_neighbors; i++)
{
int k = fine_neigh_to_coarse_neigh[i];
if (k != -1)
{
int offset = dest_coarse_B2L_maps_scratch_sizes[k];
thrust::copy(coarse_B2L_maps[i].begin(), coarse_B2L_maps[i].end(), dest_coarse_B2L_maps[k].begin() + offset);
dest_coarse_B2L_maps_scratch_sizes[k] += coarse_B2L_maps[i].size();
}
}
cudaCheckError();
int max_size = 0;
for (int i = 0; i < num_coarse_neighbors; i++)
{
int size = dest_coarse_B2L_maps[i].size();
if (size > max_size) { max_size = size; }
}
// Remove duplicates (aggregates in boundary that go to same merged partition)
for (int i = 0; i < num_coarse_neighbors; i++)
{
int size = dest_coarse_B2L_maps[i].size();
thrust::sort(dest_coarse_B2L_maps[i].begin(), dest_coarse_B2L_maps[i].begin() + size);
index_type num_unique = thrust::unique(dest_coarse_B2L_maps[i].begin(), dest_coarse_B2L_maps[i].begin() + size) - dest_coarse_B2L_maps[i].begin();
dest_coarse_B2L_maps[i].erase(dest_coarse_B2L_maps[i].begin() + num_unique, dest_coarse_B2L_maps[i].end());
}
cudaCheckError();
}
template <class TConfig>
void DistributedManagerBase<TConfig>::computeConsolidatedOffsets(const int my_id, const int my_destination_part, const bool is_root_partition, const int num_interior_rows, const int num_boundary_rows, IVector_h_vector &vertex_counts, const IVector_h &parts_to_consolidate, const int num_parts_to_consolidate, int &interior_offset, int &boundary_offset, int &total_interior_rows_in_merged, int &total_boundary_rows_in_merged, int &total_rows_in_merged, DistributedComms<TConfig> *comms)
{
IVector_h my_offsets(4);
IVector_h my_sizes(2);
my_sizes[0] = num_interior_rows;
my_sizes[1] = num_boundary_rows;
if (!is_root_partition)
{
//Send number of interior and boundary nodes to root
comms->send_vector_async(my_sizes, my_destination_part, 777);
comms->recv_vector(my_offsets, my_destination_part, 778);
comms->send_vector_wait_all(my_sizes);
}
else
{
vertex_counts.resize(num_parts_to_consolidate);
IVector_h child_sizes(2);
IVector_h offsets_interior(num_parts_to_consolidate);
IVector_h offsets_boundary(num_parts_to_consolidate);
int count_int = 0;
int count_bdy = 0;
for (int i = 0; i < num_parts_to_consolidate; i++)
{
if (parts_to_consolidate[i] == my_id)
{
child_sizes = my_sizes;
}
else
{
comms->recv_vector(child_sizes, parts_to_consolidate[i], 777);
}
//Do a simple cumulative sum to determine total number of interior/boundary rows and their offsets on a per contributing partition basis
offsets_interior[i] = count_int;
offsets_boundary[i] = count_bdy;
count_int += child_sizes[0];
count_bdy += child_sizes[1];
//Save them
vertex_counts[i].resize(2);
vertex_counts[i][0] = child_sizes[0];
vertex_counts[i][1] = child_sizes[1];
}
for (int i = 0; i < num_parts_to_consolidate; i++)
{
//Send back to contributing partitions
IVector_h offsets_to_send(4);
offsets_to_send[0] = offsets_interior[i];
offsets_to_send[1] = offsets_boundary[i];
offsets_to_send[2] = count_int;
offsets_to_send[3] = count_bdy;
if (parts_to_consolidate[i] == my_id)
{
my_offsets = offsets_to_send;
}
else
{
comms->send_vector(offsets_to_send, parts_to_consolidate[i], 778); // cannot make async, rewriting internal buffer
}
}
}
interior_offset = my_offsets[0];
boundary_offset = my_offsets[1] + my_offsets[2] - num_interior_rows;
total_interior_rows_in_merged = my_offsets[2];
total_boundary_rows_in_merged = my_offsets[3];
total_rows_in_merged = my_offsets[2] + my_offsets[3];
}
template <class TConfig>
template <class IVector_hd>
void DistributedManagerBase<TConfig>::consB2LmapsOnRoot(int &num_consolidated_neighbors, std::vector<IVector_hd> &consolidated_B2L_maps, IVector_h &consolidated_coarse_ids, std::vector<IVector_hd> &dest_coarse_B2L_maps, IVector_h &coarse_neigh_to_fine_part, IVector_h &num_bdy_per_coarse_neigh, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int my_id, int my_destination_part, bool is_root_partition, int num_coarse_neighbors, DistributedComms<TConfig> *comms)
{
// TODO: it is possible to optimize exchanges, for example fuse recv_vector in recreating coarse neigbours
// output: num_consolidated_neighbor, consolidated_B2L_maps, consolidated_coarse_ids
// input: dest_coarse_B2L_maps, is_root_partition, my_id, my_destination_part, num_fine_parts_to_consolidate, num_coarse_neighbors, coarse_neigh_to_fine_part, num_bdy_per_coarse_neigh
if (my_destination_part != my_id)
{
//if not root, send coarse neighbor list using fine indices and the corresponding boundary lists
IVector_h num_coarse_neigh(1);
num_coarse_neigh[0] = num_coarse_neighbors;
comms->send_vector_async(num_coarse_neigh, my_destination_part, 1111);
comms->send_vector_async(coarse_neigh_to_fine_part, my_destination_part, 2222);
comms->send_vector_async(num_bdy_per_coarse_neigh, my_destination_part, 3333);
for (int i = 0; i < num_coarse_neighbors; i++)
{
comms->send_vector_async(dest_coarse_B2L_maps[i], my_destination_part, 4444 + i) ;
}
comms->send_vector_wait_all(num_coarse_neigh);
comms->send_vector_wait_all(coarse_neigh_to_fine_part);
comms->send_vector_wait_all(num_bdy_per_coarse_neigh);
for (int i = 0; i < num_coarse_neighbors; i++)
{
comms->send_vector_wait_all(dest_coarse_B2L_maps[i]) ;
}
}
if (is_root_partition)
{
IVector_h num_coarse_ids_from_part(fine_parts_to_consolidate);
IVector_h_vector coarse_ids_from_part(num_fine_parts_to_consolidate);
IVector_h_vector num_coarse_neigh_bdys_from_part(num_fine_parts_to_consolidate);
//If root, receive sizes, and resize receive buffers
int total_num_coarse_ids = 0;
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
int current_part = fine_parts_to_consolidate[i];
IVector_h temp(1);
if (current_part != my_id)
{
comms->recv_vector(temp, current_part, 1111);
}
else
{
temp[0] = num_coarse_neighbors;
}
num_coarse_ids_from_part[i] = temp[0];
coarse_ids_from_part[i].resize(temp[0]);
num_coarse_neigh_bdys_from_part[i].resize(temp[0]);
total_num_coarse_ids += temp[0];
}
//Create a neighbor list for the consolidated coarse matrix, by merging coarse neighbor lists from partitions that are being merged
consolidated_coarse_ids.resize(total_num_coarse_ids);
int count = 0;
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
int current_part = fine_parts_to_consolidate[i];
// Get from each partition the coarse partition ids in their B2L maps
if (current_part != my_id)
{
comms->recv_vector(coarse_ids_from_part[i], current_part, 2222);
comms->recv_vector(num_coarse_neigh_bdys_from_part[i], current_part, 3333);
}
else
{
coarse_ids_from_part[i] = coarse_neigh_to_fine_part;
num_coarse_neigh_bdys_from_part[i] = num_bdy_per_coarse_neigh;
}
thrust::copy(coarse_ids_from_part[i].begin(), coarse_ids_from_part[i].end(), consolidated_coarse_ids.begin() + count);
count += num_coarse_ids_from_part[i];
}
cudaCheckError();
//eliminate duplicates
thrust::sort(consolidated_coarse_ids.begin(), consolidated_coarse_ids.end());
cudaCheckError();
consolidated_coarse_ids.erase(thrust::unique(consolidated_coarse_ids.begin(), consolidated_coarse_ids.end()), consolidated_coarse_ids.end());
cudaCheckError();
num_consolidated_neighbors = consolidated_coarse_ids.size();
IVector_h_vector coarse_ids_from_part_to_consolidated_neighbor(num_fine_parts_to_consolidate);;
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
coarse_ids_from_part_to_consolidated_neighbor[i].resize(num_coarse_ids_from_part[i]);
thrust::lower_bound(consolidated_coarse_ids.begin(), consolidated_coarse_ids.end(), coarse_ids_from_part[i].begin(), coarse_ids_from_part[i].end(), coarse_ids_from_part_to_consolidated_neighbor[i].begin());
}
cudaCheckError();
// Map each coarse partition to new coarse ID
consolidated_B2L_maps.resize(num_consolidated_neighbors);
IVector_h consolidated_B2L_maps_sizes(num_consolidated_neighbors);
// Offset in the consolidated_B2L_maps
IVector_h_vector coarse_ids_offsets(num_fine_parts_to_consolidate);
for (int i = 0; i < num_consolidated_neighbors; i++)
{
consolidated_B2L_maps_sizes[i] = 0;
}
// Compute the size of each consolidated_B2L_maps and offsets into it, where we will receive the parts coming from partitions that are getting merged into this one
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
coarse_ids_offsets[i].resize(num_coarse_ids_from_part[i]);
for (int j = 0; j < num_coarse_ids_from_part[i]; j++)
{
int coarse_id = coarse_ids_from_part[i][j];
int k = num_coarse_neigh_bdys_from_part[i][j];
coarse_ids_offsets[i][j] = consolidated_B2L_maps_sizes[ coarse_ids_from_part_to_consolidated_neighbor[i][j] ];
consolidated_B2L_maps_sizes[ coarse_ids_from_part_to_consolidated_neighbor[i][j] ] += k;
}
}
for (int i = 0; i < num_consolidated_neighbors; i++)
{
consolidated_B2L_maps[i].resize(consolidated_B2L_maps_sizes[i]);
}
// Receive the B2L maps from each child partition, concatenate them (gets sorted outside)
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
int current_part = fine_parts_to_consolidate[i];
for (int j = 0; j < num_coarse_ids_from_part[i]; j++)
{
int my_coarse_neigh = coarse_ids_from_part_to_consolidated_neighbor[i][j];
int offset = coarse_ids_offsets[i][j];
if (current_part != my_id)
{
comms->recv_vector( consolidated_B2L_maps[my_coarse_neigh], current_part, 4444 + j, offset, num_coarse_neigh_bdys_from_part[i][j]); //Need to do proper tagging here, otherwise messages from the same source would get mixed up
}
else
{
thrust::copy(dest_coarse_B2L_maps[j].begin(), dest_coarse_B2L_maps[j].end(), consolidated_B2L_maps[my_coarse_neigh].begin() + offset);
}
}
}
cudaCheckError();
}
}
template <class TConfig>
void DistributedManagerBase<TConfig>::consolidateAndRenumberHalos(IVector_h &aggregates, const IVector_h &manager_halo_offsets, IVector_h &halo_offsets, const IVector_h &neighbors, int num_fine_neighbors, const IVector_h &consolidated_coarse_ids, int num_consolidated_neighbors, const IVector_h &destination_part, int my_destination_part, bool is_root_partition, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int num_parts, int my_id, int total_rows_in_merged, int &num_all_aggregates, DistributedComms<TConfig> *comms)
{
consAndRenumberHalos(aggregates, manager_halo_offsets, halo_offsets, neighbors, num_fine_neighbors, consolidated_coarse_ids, num_consolidated_neighbors, destination_part, my_destination_part, is_root_partition, fine_parts_to_consolidate, num_fine_parts_to_consolidate, num_parts, my_id, total_rows_in_merged, num_all_aggregates, comms);
}
template <class TConfig>
void DistributedManagerBase<TConfig>::consolidateAndRenumberHalos(IVector_d &aggregates, const IVector_h &manager_halo_offsets, IVector_h &halo_offsets, const IVector_h &neighbors, int num_fine_neighbors, const IVector_h &consolidated_coarse_ids, int num_consolidated_neighbors, const IVector_h &destination_part, int my_destination_part, bool is_root_partition, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int num_parts, int my_id, int total_rows_in_merged, int &num_all_aggregates, DistributedComms<TConfig> *comms)
{
consAndRenumberHalos(aggregates, manager_halo_offsets, halo_offsets, neighbors, num_fine_neighbors, consolidated_coarse_ids, num_consolidated_neighbors, destination_part, my_destination_part, is_root_partition, fine_parts_to_consolidate, num_fine_parts_to_consolidate, num_parts, my_id, total_rows_in_merged, num_all_aggregates, comms);
}
template <class TConfig>
template <class IVector_hd>
void DistributedManagerBase<TConfig>::consAndRenumberHalos(IVector_hd &aggregates, const IVector_h &manager_halo_offsets, IVector_h &halo_offsets, const IVector_h &neighbors, int num_fine_neighbors, const IVector_h &consolidated_coarse_neigh_to_fine_part, int num_consolidated_neighbors, const IVector_h &destination_part, int my_destination_part, bool is_root_partition, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int num_parts, int my_id, int total_rows_in_merged, int &num_all_aggregates, DistributedComms<TConfig> *comms)
{
/*
* EXAMPLE 2
This example is independent from the previous ones.
Say partition 0 and 1 are merging (into 0) partition 0 is neighbors with 1,2,3 and partition 1 is neighbors with 0,3,4
Partitions 3 and 4 are merging (into partition 3) and partition 2 is not merging with anyone.
This example details the renumbering of halo indices on partition 0 and partition 1.
aggregates on partition 0:
[(fine interior nodes) (fine boundary nodes) (fine halo from part 1) (fine halo from part 2) (fine halo from part 3)]
[(fine interior nodes) (fine boundary nodes) (13 13 15) (12 15 17) (14 16 18)]
aggregates on partition 1:
[(fine interior nodes) (fine boundary nodes) (fine halo from part 0) (fine halo from part 3) (fine halo from part 4)]
[(fine interior nodes) (fine boundary nodes) (14 16 17) (18 19 19) (15 15 17)]
manager_halo_offsets on partition 0:
[22 25 28 31]
manager_halo_offsets on partition 1:
[20 23 26 29]
halo_offsets on both partitions are uninitialised: [0 0 0] and [0 0]
neighbors on partition 0: [1 2 3] partition 1: [0 3 4]
num_fine_neighbors partition 0: 3 partition 1: 3
consolidated_coarse_neigh_to_fine_part partition 0: [2 3] partition 1: [3]
num_consolidated_neighbors partition 0: 2 partition 1: 1
destination_part [0 0 2 3 3]
my_destination_part partition 0: 0 partition 1: 0
is_root_partition partition 0: true partition 1: false
fine_parts_to_consolidate partition 0: [0 1]
num_fine_parts_to_consolidate partition 0: 2
num_parts 5
my_id partition 0: 0 partition 1: 1
total_rows_in_merged partition 0 and 1: 24 (=sum of the two below)
num_all_aggregates partition partition 0: 13 partition 1: 11 - will be updated with the number of halo aggregates
*/
//
// Step 9.2 - com up with nonmerge lists
//
int num_fine_nonmerge_neighbors;// = fine_nonmerge_neighbors.size();
//NUmber of neighbors we are not merging with
num_fine_nonmerge_neighbors = 0;
for (int i = 0 ; i < num_fine_neighbors; i++)
{
if (destination_part[neighbors[i]] != my_destination_part)
{
num_fine_nonmerge_neighbors++;
}
}
IVector_h halo_sizes(num_fine_nonmerge_neighbors);
IVector_h fine_nonmerge_neigh_to_cons_fine_part(num_fine_nonmerge_neighbors);
IVector_h fine_nonmerge_neighbor_to_fine_neighbor(num_fine_nonmerge_neighbors);
num_fine_nonmerge_neighbors = 0;
for (int i = 0 ; i < num_fine_neighbors; i++)
{
if (destination_part[neighbors[i]] != my_destination_part)
{
halo_sizes[num_fine_nonmerge_neighbors] = manager_halo_offsets[i + 1] - manager_halo_offsets[i];
fine_nonmerge_neighbor_to_fine_neighbor[num_fine_nonmerge_neighbors] = i;
fine_nonmerge_neigh_to_cons_fine_part[num_fine_nonmerge_neighbors] = destination_part[neighbors[i]];
num_fine_nonmerge_neighbors++;
}
}
/*
* EXAMPLE 2
num_fine_nonmerge_neighbors partition 0: 2 partition 1: 2
fine_nonmerge_neighbor_to_fine_neighbor partition 0: [1 2] partition 1: [1 2]
fine_nonmerge_neigh_to_cons_fine_part partition 0: [2 3] partition 1: [3 3]
halo_sizes partition 0: [3 3] partition 1: [3 3]
*/
//Send them to root along with the halo parts of the aggregates vector
if (!is_root_partition)
{
IVector_h num_fine_nonmerge_neigh(1);
num_fine_nonmerge_neigh[0] = num_fine_nonmerge_neighbors;
// TODO: async? might be faster.
comms->send_vector(num_fine_nonmerge_neigh, my_destination_part, 1111);
comms->send_vector(halo_sizes, my_destination_part, 2222);
comms->send_vector(fine_nonmerge_neigh_to_cons_fine_part, my_destination_part, 3333);
// Here check l2h_identity flag and act accordingly
for (int i = 0; i < num_fine_nonmerge_neighbors; i++)
{
comms->send_vector_async(aggregates, my_destination_part, 4444 + i, manager_halo_offsets[fine_nonmerge_neighbor_to_fine_neighbor[i]], halo_sizes[i]) ;
}
//comms->send_vector_wait_all(num_fine_nonmerge_neigh);
//comms->send_vector_wait_all(halo_sizes);
//comms->send_vector_wait_all(fine_nonmerge_neigh_to_cons_fine_part);
comms->send_vector_wait_all(aggregates);
/*
* EXAMPLE 2
Partition 1 sends to partition 0:
num_fine_nonmerge_neigh 2
halo_sizes [3 3]
fine_nonmerge_neigh_to_cons_fine_part [3 3]
for loop: sends two pieces: [(18 19 19)] [(15 15 17)]
*/
}
if (is_root_partition)
{
//
// Step 9.3 Root receives this info, creates metadata
//
std::vector<VecInt_t> num_fine_nonmerge_neigh_array(num_fine_parts_to_consolidate);
IVector_h_vector halo_sizes_array(num_fine_parts_to_consolidate);
IVector_h_vector fine_nonmerge_neigh_to_cons_fine_part_array(num_fine_parts_to_consolidate);
std::vector<std::vector<IVector> > fine_halo_aggregates_to_root_array(num_fine_parts_to_consolidate);
std::vector<VecInt_t> min_index_coarse_halo(num_consolidated_neighbors, 0x7FFFFFFF);
std::vector<VecInt_t> max_index_coarse_halo(num_consolidated_neighbors, 0);
std::vector<VecInt_t> fine_part_to_consolidated_neighbor(num_parts, -1);
for (int i = 0; i < num_consolidated_neighbors; i++)
{
fine_part_to_consolidated_neighbor[consolidated_coarse_neigh_to_fine_part[i]] = i;
}
/*
* EXAMPLE 2
everything from here on is for partition 0, since that is the root partition
fine_part_to_consolidated_neighbor [-1 -1 0 1 -1]
*/
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
int current_part = fine_parts_to_consolidate[i];
IVector_h temp(1);
if (current_part != my_id)
{
comms->recv_vector(temp, current_part, 1111);
}
else
{
temp[0] = num_fine_nonmerge_neighbors;
}
num_fine_nonmerge_neigh_array[i] = temp[0];
halo_sizes_array[i].resize(temp[0]);
fine_nonmerge_neigh_to_cons_fine_part_array[i].resize(temp[0]);
fine_halo_aggregates_to_root_array[i].resize(temp[0]);
if (current_part != my_id)
{
comms->recv_vector(halo_sizes_array[i], current_part, 2222);
}
else
{
halo_sizes_array[i] = halo_sizes;
}
if (current_part != my_id)
{
comms->recv_vector(fine_nonmerge_neigh_to_cons_fine_part_array[i], current_part, 3333);
}
else
{
fine_nonmerge_neigh_to_cons_fine_part_array[i] = fine_nonmerge_neigh_to_cons_fine_part;
}
//Receive the halo regions
for (int j = 0; j < temp[0]; j++)
{
fine_halo_aggregates_to_root_array[i][j].resize(halo_sizes_array[i][j]);
if (current_part != my_id)
{
comms->recv_vector(fine_halo_aggregates_to_root_array[i][j], current_part, 4444 + j);
}
else
{
//HERE
thrust::copy(aggregates.begin() + manager_halo_offsets[fine_nonmerge_neighbor_to_fine_neighbor[j]],
aggregates.begin() + manager_halo_offsets[fine_nonmerge_neighbor_to_fine_neighbor[j]] + halo_sizes[j],
fine_halo_aggregates_to_root_array[i][j].begin()); //TODO: not do this copying around on the root
}
#define MIN(a,b) a<b?a:b;
#define MAX(a,b) a>b?a:b;
//Find minimum and maximum halo indices as not to allocate too much scratch space later
int min_index = thrust::reduce(fine_halo_aggregates_to_root_array[i][j].begin(), fine_halo_aggregates_to_root_array[i][j].end(), int(0x7FFFFFFF), thrust::minimum<int>());
int max_index = thrust::reduce(fine_halo_aggregates_to_root_array[i][j].begin(), fine_halo_aggregates_to_root_array[i][j].end(), int(0), thrust::maximum<int>());
min_index_coarse_halo[fine_part_to_consolidated_neighbor[fine_nonmerge_neigh_to_cons_fine_part_array[i][j]]] = MIN((int)min_index_coarse_halo[fine_part_to_consolidated_neighbor[fine_nonmerge_neigh_to_cons_fine_part_array[i][j]]], min_index);
max_index_coarse_halo[fine_part_to_consolidated_neighbor[fine_nonmerge_neigh_to_cons_fine_part_array[i][j]]] = MAX((int)max_index_coarse_halo[fine_part_to_consolidated_neighbor[fine_nonmerge_neigh_to_cons_fine_part_array[i][j]]], max_index);
}
}
cudaCheckError();
/*
* EXAMPLE 2
num_fine_nonmerge_neigh_array = [2 2]
halo_sizes_array = [[3 3][3 3]]
fine_nonmerge_neigh_to_cons_fine_part_array[][] = [[2 3][3 3]]
fine_halo_aggregates_to_root_array[from][to][fine halo vertex] [[[12 15 17][14 16 18]]
[[18 19 19][15 15 17]]]
min_index_coarse_halo[12 14]
max_index_coarse_halo[17 19]
*/
halo_offsets[0] = total_rows_in_merged;
//Now we have all the halo nodes, let's renumber them.
int min_index = thrust::reduce(min_index_coarse_halo.begin(), min_index_coarse_halo.end(), int(0x7FFFFFFF), thrust::minimum<int>());
int max_index = thrust::reduce(max_index_coarse_halo.begin(), max_index_coarse_halo.end(), int(0), thrust::maximum<int>());
cudaCheckError();
//
// Step 9.4 compute halo indices on root nodes
//
int scratch_size;
if (num_consolidated_neighbors == 0)
{
scratch_size = 1;
}
else
{
scratch_size = max_index - min_index + 2;
}
IVector scratch(scratch_size);
for (int i = 0; i < num_consolidated_neighbors; i++)
{
thrust::fill(scratch.begin(), scratch.end(), 0);
int dest_part = consolidated_coarse_neigh_to_fine_part[i];
//Flag halo indices that occur for a specific coarse neighbor
for (int j = 0; j < num_fine_parts_to_consolidate; j++)
{
for (int k = 0; k < num_fine_nonmerge_neigh_array[j]; k++)
{
if (fine_nonmerge_neigh_to_cons_fine_part_array[j][k] == dest_part)
{
int size = halo_sizes_array[j][k];
this->flag_halo_ids(size, scratch, fine_halo_aggregates_to_root_array[j][k], min_index_coarse_halo[i], max_index, min_index);
}
}
}
thrust::exclusive_scan(scratch.begin(), scratch.end(), scratch.begin(), halo_offsets[i]); //renumber them with the proper offset into our halo
halo_offsets[i + 1] = scratch[scratch.size() - 1];
//now read them back
for (int j = 0; j < num_fine_parts_to_consolidate; j++)
{
for (int k = 0; k < num_fine_nonmerge_neigh_array[j]; k++)
{
if (fine_nonmerge_neigh_to_cons_fine_part_array[j][k] == dest_part)
{
int size = halo_sizes_array[j][k];
int block_size = 128;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (size - 1) / block_size + 1);
this->read_halo_ids(size, scratch, fine_halo_aggregates_to_root_array[j][k], min_index_coarse_halo[i]);
//and send them back to contributing partitions
hipDeviceSynchronize(); //TODO: don't need to synchronize when using GPUDirect
int current_part = fine_parts_to_consolidate[j];
int tag = 4444 + dest_part;
if (current_part != my_id)
{
comms->send_vector_async(fine_halo_aggregates_to_root_array[j][k], current_part, tag); //!!!!: we are sending them back not in sequential order, need tags!!!!
}
else
{
thrust::copy(fine_halo_aggregates_to_root_array[j][k].begin(), fine_halo_aggregates_to_root_array[j][k].end(), aggregates.begin() + manager_halo_offsets[fine_nonmerge_neighbor_to_fine_neighbor[k]]);
}
}
}
}
/*
* EXAMPLE 2
the array that is sent back in pieces:
fine_halo_aggregates_to_root_array[from][to][fine halo vertex] [[[24 25 26][27 29 31]]
[[31 32 32][28 28 30]]]
halo_offsets = [24 27 33]
*/
} // Loop over consolidated neighbors
cudaCheckError();
// Wait for sends to have completed (this is to prevent fine_halo_aggregates_to_root_array to be destroyed before send has finished)
for (int i = 0; i < num_consolidated_neighbors; i++)
{
int dest_part = consolidated_coarse_neigh_to_fine_part[i];
for (int j = 0; j < num_fine_parts_to_consolidate; j++)
{
for (int k = 0; k < num_fine_nonmerge_neigh_array[j]; k++)
{
if (fine_nonmerge_neigh_to_cons_fine_part_array[j][k] == dest_part)
{
int current_part = fine_parts_to_consolidate[j];
if (current_part != my_id)
{
comms->send_vector_wait_all(fine_halo_aggregates_to_root_array[j][k]);
}
}
}
}
} // Loop over consolidated neighbors
//Send total number of rows in the aggregated matrix
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
int current_part = fine_parts_to_consolidate[i];
IVector_h total_rows(1);
total_rows[0] = halo_offsets[num_consolidated_neighbors];
if (current_part != my_id)
{
comms->send_vector(total_rows, current_part, 5555);
}
else
{
num_all_aggregates = total_rows[0];
}
}
} // If is root partition
if (!is_root_partition)
{
for (int i = 0; i < num_fine_nonmerge_neighbors; i++)
{
int tag = 4444 + fine_nonmerge_neigh_to_cons_fine_part[i];
comms->recv_vector(aggregates, my_destination_part, tag, manager_halo_offsets[fine_nonmerge_neighbor_to_fine_neighbor[i]], halo_sizes[i]);
}
IVector_h total_rows(1);
comms->recv_vector(total_rows, my_destination_part, 5555);
num_all_aggregates = total_rows[0];
}
/*
* EXAMPLE 2
num_all_aggregates = 33 (both partitions 0 and 1
*/
}
template <class TConfig>
void DistributedManagerBase<TConfig>::ipcExchangePtr(void *&ptr, bool is_root_partition, int num_parts_to_consolidate, IVector_h &parts_to_consolidate, int my_root_partition, int my_id, DistributedComms<TConfig> *comms)
{
hipIpcMemHandle_t handle;
if (is_root_partition)
{
hipIpcGetMemHandle( (hipIpcMemHandle_t *) &handle, ptr ) ;
for (int i = 0; i < num_parts_to_consolidate; i++)
{
int current_part = parts_to_consolidate[i];
if (current_part != my_id)
{
comms->send_raw_data(&handle, sizeof(handle), current_part, 456);
}
}
}
else
{
comms->recv_raw_data(&handle, sizeof(handle), my_root_partition, 456);
hipError_t err = hipIpcOpenMemHandle( (void **) &ptr, handle, hipIpcMemLazyEnablePeerAccess);
}
}
template <class TConfig>
void DistributedManagerBase<TConfig>::ipcWaitForChildren(bool is_root_partition, int num_parts_to_consolidate, IVector_h &parts_to_consolidate, int my_destination_part, int my_id, DistributedComms<TConfig> *comms)
{
hipEvent_t event;
hipIpcEventHandle_t event_handle;
hipEventCreate(&event, hipEventDisableTiming | hipEventInterprocess);
hipIpcGetEventHandle( &event_handle, event);
// Each rank record the event
hipEventRecord(event);
if (is_root_partition)
{
std::vector<hipEvent_t> child_events(num_parts_to_consolidate);
std::vector<hipIpcEventHandle_t> child_event_handles(num_parts_to_consolidate);
// Root partition receives event_handles from child and stores in child_event_handles
for (int i = 0; i < num_parts_to_consolidate; i++)
{
int current_part = parts_to_consolidate[i];
if (current_part != my_id)
{
comms->recv_raw_data(&(child_event_handles[i]), sizeof(hipIpcEventHandle_t), current_part, 987 + current_part);
hipIpcOpenEventHandle(&child_events[i], child_event_handles[i]);
}
}
for (int i = 0; i < num_parts_to_consolidate; i++)
{
if (parts_to_consolidate[i] != my_id)
{
hipEventSynchronize(child_events[i]);
}
}
}
else
{
comms->send_raw_data(&event_handle, sizeof(hipIpcEventHandle_t), my_destination_part, 987 + my_id);
}
}
template <class TConfig>
void DistributedManagerBase<TConfig>::ipcWaitForRoot(bool is_root_partition, int num_parts_to_consolidate, IVector_h &parts_to_consolidate, int my_destination_part, int my_id, DistributedComms<TConfig> *comms)
{
hipEvent_t event;
hipIpcEventHandle_t event_handle;
hipEventCreate(&event, hipEventDisableTiming | hipEventInterprocess);
if (is_root_partition)
{
hipIpcGetEventHandle( &event_handle, event);
// Root records the event
hipEventRecord(event);
// Root partition sends event_handles to child
for (int i = 0; i < num_parts_to_consolidate; i++)
{
int current_part = parts_to_consolidate[i];
if (current_part != my_id)
{
comms->send_raw_data(&event_handle, sizeof(event_handle), current_part, 988 + current_part);
}
}
}
else
{
comms->recv_raw_data(&event_handle, sizeof(event_handle), my_destination_part, 988 + my_id);
hipIpcOpenEventHandle(&event, event_handle);
hipEventSynchronize(event);
}
}
template <class TConfig>
void DistributedManagerBase<TConfig>::read_halo_ids(int size, IVector_d &scratch, IVector_d &halo_aggregates, VecInt_t min_index_coarse_halo)
{
int block_size = 128;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (size - 1) / block_size + 1);
hipLaunchKernelGGL(( read_halo_ids_kernel) , dim3(num_blocks), dim3(block_size), 0, 0, scratch.raw(), halo_aggregates.raw(), min_index_coarse_halo, size);
cudaCheckError();
}
template <class TConfig>
void DistributedManagerBase<TConfig>::read_halo_ids(int size, IVector_h &scratch, IVector_h &halo_aggregates, VecInt_t min_index_coarse_halo)
{
FatalError("read_halo_ids not implemented on host yet", AMGX_ERR_NOT_IMPLEMENTED);
}
template <class TConfig>
void DistributedManagerBase<TConfig>::flag_halo_ids(int size, IVector_d &scratch, IVector_d &halo_aggregates, VecInt_t min_index_coarse_halo, int max_index, int min_index)
{
int block_size = 128;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (size - 1) / block_size + 1);
hipLaunchKernelGGL(( flag_halo_ids_kernel) , dim3(num_blocks), dim3(block_size), 0, 0, scratch.raw(), halo_aggregates.raw(), min_index_coarse_halo, size, max_index - min_index + 1);
cudaCheckError();
}
template <class TConfig>
void DistributedManagerBase<TConfig>::flag_halo_ids(int size, IVector_h &scratch, IVector_h &halo_aggregates, VecInt_t min_index_coarse_halo, int max_index, int min_index)
{
FatalError("flag_halo_ids not implemented on host yet", AMGX_ERR_NOT_IMPLEMENTED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::consolidateAndUploadAll(int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag, Matrix<TConfig> &A)
{
FatalError("Fine level consolidation not implemented on host yet", AMGX_ERR_NOT_IMPLEMENTED);
}
template<class TConfig>
void DistributedManagerBase<TConfig>::exchangeSolveResultsConsolidation(int &num_iters, std::vector<PODVector_h> &res_history, AMGX_STATUS &status, bool store_res_history)
{
int bsize = this->A->get_block_size();
PODVector_h res_history_tmp;
if (!m_is_fine_level_consolidated)
{
return;
}
else
{
int my_id = this->getFineLevelComms()->get_global_id();
IVector_h my_num_iters(1);
if (m_is_fine_level_root_partition)
{
my_num_iters[0] = num_iters;
if (store_res_history)
{
// Pack the res_history vector into array
res_history_tmp.resize( (num_iters + 1)*bsize);
for (int i = 0; i < num_iters + 1; i++)
{
for (int j = 0; j < bsize; j++)
{
res_history_tmp[i * bsize + j] = res_history[i][j];
}
}
}
for (int i = 0; i < m_num_fine_level_parts_to_consolidate; i++)
{
int current_part = m_fine_level_parts_to_consolidate[i];
if (my_id != current_part)
{
getFineLevelComms()->send_vector_async(my_num_iters, current_part, 245);
if (store_res_history)
{
getFineLevelComms()->send_vector_async(res_history_tmp, current_part, 246);
}
}
}
for (int i = 0; i < m_num_fine_level_parts_to_consolidate; i++)
{
int current_part = m_fine_level_parts_to_consolidate[i];
if (my_id != current_part)
{
getFineLevelComms()->send_raw_data(&status, sizeof(status), current_part, 247);
}
}
getFineLevelComms()->send_vector_wait_all(my_num_iters);
if (store_res_history)
{
getFineLevelComms()->send_vector_wait_all(res_history_tmp);
}
}
else
{
// Store num_iters
getFineLevelComms()->recv_vector(my_num_iters, m_my_fine_level_destination_part, 245);
num_iters = my_num_iters[0];
if (store_res_history)
{
// Fill res_history vector
res_history.resize(num_iters + 1);
res_history_tmp.resize( (num_iters + 1)*bsize);
getFineLevelComms()->recv_vector(res_history_tmp, m_my_fine_level_destination_part, 246);
for (int i = 0; i < num_iters + 1; i++)
{
res_history[i].resize(bsize);
for (int j = 0; j < bsize; j++)
{
res_history[i][j] = res_history_tmp[i * bsize + j];
}
}
}
getFineLevelComms()->recv_raw_data(&status, sizeof(status), m_my_fine_level_destination_part, 247);
}
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::consolidateAndUploadAll(int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag, Matrix<TConfig> &in_A)
{
this->A = &in_A;
this->createComms(this->A->getResources()); //refresh comms
DistributedComms<TConfig> *comms = this->getComms();
int my_id = comms->get_global_id();
int num_parts = comms->get_num_partitions();
int num_rings = this->num_halo_rings();
int num_neighbors = this->neighbors.size();
// All partitions have to call this, otherwise it fails
// Step 1: Figure out which partition should be consolidated together based on their host_name and their PCI-E slot ID
IVector_h destination_part(num_parts);
this->computeDestinationPartitionsWithCons(my_id, num_parts, destination_part, comms);
int my_destination_part = destination_part[my_id];
// Check if I'm root partition and how many msgs I will receive
bool is_root_partition = false;
int num_parts_to_consolidate = 0;
for (int i = 0; i < num_parts; i++)
{
if (destination_part[i] == my_id)
{
is_root_partition = true;
num_parts_to_consolidate++;
}
}
if (my_destination_part >= num_parts)
{
FatalError("During consolidation, sending data to partition that doesn't exist", AMGX_ERR_NOT_IMPLEMENTED);
}
// Create cons_part_to_part map
IVector_h cons_part_to_part = destination_part;
thrust::sort(cons_part_to_part.begin(), cons_part_to_part.end());
cudaCheckError();
cons_part_to_part.erase(thrust::unique(cons_part_to_part.begin(), cons_part_to_part.end()), cons_part_to_part.end());
cudaCheckError();
int num_cons_partitions = cons_part_to_part.size();
// If number of consolidated partitions is the same as number of partitions, simply call uploadAll
if (num_cons_partitions == num_parts)
{
this->initializeUploadReorderAll(n, nnz, block_dimx, block_dimy, row_ptrs, col_indices, data, diag, *(this->A));
this->m_is_fine_level_consolidated = false;
return;
}
if (is_root_partition)
{
this->A->getResources()->expandRootPool();
}
this->m_is_fine_level_consolidated = true;
if (num_rings != 1)
{
FatalError("num_rings > 1 not supported in fine_level consolidation", AMGX_ERR_NOT_IMPLEMENTED);
}
// Fill with b2l_maps
IVector_h_vector B2L_maps_tmp;
B2L_maps_tmp.resize(num_neighbors);
for (int i = 0; i < num_neighbors; i++)
{
B2L_maps_tmp[i] = this->cached_B2L_maps[i];
}
bool useCudaIpc = this->m_use_cuda_ipc_consolidation;
mat_value_type *data_hd = NULL;
mat_value_type *diag_hd = NULL;
int *col_indices_hd = NULL;
int data_alloc = 0;
int diag_alloc = 0;
int col_alloc = 0;
col_indices_hd = (int *) this->getDevicePointerForData((void *)col_indices, nnz * block_dimx * block_dimy * sizeof(int), &col_alloc);
data_hd = (mat_value_type *) this->getDevicePointerForData((void *)data, nnz * block_dimx * block_dimy * sizeof(mat_value_type), &data_alloc);
if (diag != NULL)
{
diag_hd = (mat_value_type *) this->getDevicePointerForData((void *)diag, nnz * block_dimx * block_dimy * sizeof(mat_value_type), &diag_alloc);
}
// Copy the original row_offsets array (this is required when replacing coefficients
this->m_old_row_offsets_CONS.resize(n + 1);
hipMemcpy(this->m_old_row_offsets_CONS.raw(), row_ptrs, (n + 1)*sizeof(int), hipMemcpyDefault);
cudaCheckError();
this->m_old_nnz_CONS = nnz;
// This function:
// Creates fine level consolidated matrices
// Modifies the btl_maps, lth_maps
// Create part_to_cons_part map
IVector_h part_to_cons_part(num_parts);
thrust::lower_bound(cons_part_to_part.begin(), cons_part_to_part.end(), destination_part.begin(), destination_part.end(), part_to_cons_part.begin());
cudaCheckError();
IVector_h neigh_to_part;
this->createNeighToDestPartMap(neigh_to_part, this->neighbors, destination_part, num_neighbors);
IVector_h cons_neigh_to_part;
int num_cons_neighbors;
this->createConsolidatedNeighToPartMap(cons_neigh_to_part, neigh_to_part, my_destination_part, destination_part, num_cons_neighbors);
IVector_h neigh_to_cons_neigh;
this->createNeighToConsNeigh( neigh_to_cons_neigh, cons_neigh_to_part, neigh_to_part, my_destination_part, num_neighbors);
// ---------------------------------------
// MERGE B2L MAPS BASED ON DEST PARTITION
// ---------------------------------------
IVector_h_vector dest_B2L_maps;
this->consolidateB2Lmaps(dest_B2L_maps, B2L_maps_tmp, neigh_to_cons_neigh, num_cons_neighbors, num_neighbors);
// ------------------------------------
// Renumber interior and boundary rows
// ------------------------------------
int num_interior_rows;
int num_boundary_rows;
IVector_h renumbering;
this->createAggregatesRenumbering(renumbering, dest_B2L_maps, n, num_cons_neighbors, num_interior_rows, num_boundary_rows, num_rings);
// --------------------------------------------------
// Create list of destination parts to consolidate
// --------------------------------------------------
// Store whether or not this is a root partition on fine level
IVector_h parts_to_consolidate;
parts_to_consolidate.resize(num_parts_to_consolidate);
int count = 0;
for (int i = 0; i < num_parts; i++)
{
if (destination_part[i] == my_id)
{
parts_to_consolidate[count] = i;
count++;
}
}
// ---------------------------------------------------------------------
// Each partition computes its offset for its interior and boundary nodes
// ---------------------------------------------------------------------
IVector_h_vector vertex_counts;
int interior_offset, boundary_offset, total_interior_rows_in_merged, total_boundary_rows_in_merged;
int total_rows_in_merged;
this->computeConsolidatedOffsets(my_id, my_destination_part, is_root_partition, num_interior_rows, num_boundary_rows, vertex_counts, parts_to_consolidate, num_parts_to_consolidate, interior_offset, boundary_offset, total_interior_rows_in_merged, total_boundary_rows_in_merged, total_rows_in_merged, comms);
// -----------------------------------
// Each partition renumber it's rows
// -----------------------------------
int total_num_halos = 0;
// Pack new bdy_ids
for (int i = 0; i < num_neighbors; i++)
{
total_num_halos += this->cached_L2H_maps[i].size();
}
IVector_h row_ids(n + total_num_halos, -1);
this->m_row_ids_CONS.resize(n + total_num_halos);
// Renumber the interior and boundary rows
for (int i = 0; i < n; i++)
{
int new_id;
if (renumbering.size() == 0)
{
new_id = i;
}
else
{
new_id = renumbering[i];
}
new_id += ((new_id >= num_interior_rows) ? boundary_offset : interior_offset);
row_ids[i] = new_id;
}
for (int i = 0; i < num_cons_neighbors; i++)
{
thrust::transform(dest_B2L_maps[i].begin(),
dest_B2L_maps[i].end(),
thrust::constant_iterator<index_type>(boundary_offset),
dest_B2L_maps[i].begin(),
thrust::plus<index_type>());
}
cudaCheckError();
// -------------------------------------------------
// Send dest_B2L_maps to root partitions
// ------------------------------------------------
IVector_h num_bdy_per_cons_neigh(num_cons_neighbors);
for (int i = 0; i < num_cons_neighbors; i++)
{
num_bdy_per_cons_neigh[i] = dest_B2L_maps[i].size();
}
IVector_h root_cons_neighbors;
int root_num_cons_neighbors = 0;
IVector_h_vector cons_B2L_maps;
this->consolidateB2LmapsOnRoot(root_num_cons_neighbors, cons_B2L_maps, root_cons_neighbors, dest_B2L_maps, cons_neigh_to_part, num_bdy_per_cons_neigh, parts_to_consolidate, num_parts_to_consolidate, my_id, my_destination_part, is_root_partition, num_cons_neighbors, comms);
IVector_h halo_ids_offsets(num_neighbors + 1);
IVector_h halo_ids;
int halo_ids_size = 0;
halo_ids_offsets[0] = 0;
for (int i = 0; i < num_neighbors; i++)
{
halo_ids_size += this->cached_L2H_maps[i].size();
halo_ids_offsets[i + 1] = halo_ids_size;
}
halo_ids.resize(halo_ids_size);
// Do exchange with neighbors
// Pack new bdy_ids
IVector_h_vector bdy_ids;
bdy_ids.resize(num_neighbors);
for (int i = 0; i < num_neighbors; i++)
{
int size = this->cached_B2L_maps[i].size();
bdy_ids[i].resize(size);
// Pack buffer
for (int j = 0; j < size; j++)
{
bdy_ids[i][j] = row_ids[this->cached_B2L_maps[i][j]];
}
}
for (int i = 0; i < num_neighbors; i++)
{
comms->send_vector_async(bdy_ids[i], this->neighbors[i], 6666 + this->neighbors[i]);
}
for (int i = 0; i < num_neighbors; i++)
{
comms->recv_vector(halo_ids, this->neighbors[i], 6666 + my_id, halo_ids_offsets[i], this->cached_L2H_maps[i].size());
}
for (int i = 0; i < num_neighbors; i++)
{
comms->send_vector_wait_all(bdy_ids[i]);
}
IVector_h halo_offsets(root_num_cons_neighbors + 1, 0);
int root_num_rows;
this->consolidateAndRenumberHalos(halo_ids, halo_ids_offsets, halo_offsets, this->neighbors, num_neighbors, root_cons_neighbors, root_num_cons_neighbors, destination_part, my_destination_part, is_root_partition, parts_to_consolidate, num_parts_to_consolidate, num_parts, my_id, total_rows_in_merged, root_num_rows, comms);
if (is_root_partition)
{
this->B2L_maps.resize(cons_B2L_maps.size());
for (int i = 0; i < cons_B2L_maps.size(); i++)
{
thrust::sort(cons_B2L_maps[i].begin(), cons_B2L_maps[i].end());
this->B2L_maps[i].copy(cons_B2L_maps[i]); // H2D copy of B2L maps
}
cudaCheckError();
}
// Now renumber the row_ids based on lth_maps
count = 0;
for (int i = 0; i < num_neighbors; i++)
{
for (int j = 0; j < this->cached_L2H_maps[i].size(); j++)
{
row_ids[this->cached_L2H_maps[i][j]] = halo_ids[count];
count++;
}
}
hipMemcpy(this->m_row_ids_CONS.raw(), row_ids.raw(), (n + total_num_halos)*sizeof(int), hipMemcpyDefault);
cudaCheckError();
int bsize = block_dimx * block_dimy;
if (is_root_partition)
{
this->A->row_offsets.resize(root_num_rows + 1);
}
void *root_row_ptr = (void *) this->A->row_offsets.raw();
if (useCudaIpc)
{
// ----------------------------------------------------
// 1. cudaIPC to get pointer to root's row_offset array
// ----------------------------------------------------
this->ipcExchangePtr(root_row_ptr, is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms);
cudaCheckError();
// -------------------------------------------------------------------
// 2. each rank copy it's row length on root partition using row_ids
// -------------------------------------------------------------------
int cta_size = 128;
int grid_size = min(4096, (n + total_num_halos + cta_size - 1) / cta_size);
hipLaunchKernelGGL(( zero_copy_row_lengths_ids_offsets<mat_value_type>) , dim3(grid_size), dim3(cta_size), 0, 0, this->m_old_row_offsets_CONS.raw(), ((int *) root_row_ptr) /* IPC */, this->m_row_ids_CONS.raw(), n, total_num_halos, (mat_value_type *) diag);
cudaCheckError();
// Root partition waits for children to be done writing their result
this->ipcWaitForChildren(is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms);
cudaCheckError();
}
else // CudaIpcNotAvailable
{
this->checkPinnedBuffer( max( nnz * sizeof(mat_value_type), (n + 1)*max(sizeof(index_type), sizeof(value_type)) ) );
if (!is_root_partition)
{
IVector_h data_to_send(3);
data_to_send[0] = n;
data_to_send[1] = nnz;
data_to_send[2] = total_num_halos;
int dummy;
void *row_ptrs_to_send = this->getHostPointerForData((void *)row_ptrs, sizeof(index_type) * (n + 1), &dummy);
comms->send_vector(data_to_send, my_destination_part, 10000 + my_id);
comms->send_raw_data(row_ptrs_to_send, (n + 1)*sizeof(int), my_destination_part, 10001 + my_id);
comms->send_raw_data(&row_ids[0], (n + total_num_halos)*sizeof(int), my_destination_part, 10002 + my_id);
}
else
{
hipEvent_t event;
hipEventCreate(&event);
//TODO: Could use streams here
//TODO: Avoid extra device to host copies
std::vector<IVector_h> data_recv(num_parts_to_consolidate);
for (int i = 0; i < num_parts_to_consolidate; i++)
{
data_recv[i].resize(3);
int current_part = parts_to_consolidate[i];
if (current_part != my_id)
{
comms->recv_vector(data_recv[i], current_part, 10000 + current_part);
}
else
{
data_recv[i][0] = n;
data_recv[i][1] = nnz;
data_recv[i][2] = total_num_halos;
}
}
this->m_child_n.resize(num_parts_to_consolidate);
this->m_child_nnz.resize(num_parts_to_consolidate);
this->m_child_num_halos.resize(num_parts_to_consolidate);
this->m_child_row_ids.resize(num_parts_to_consolidate);
this->m_child_old_row_offsets.resize(num_parts_to_consolidate);
int max_n = 0;
int max_nnz = 0;
for (int i = 0; i < num_parts_to_consolidate; i++)
{
int current_part = parts_to_consolidate[i];
this->m_child_n[i] = data_recv[i][0];
this->m_child_nnz[i] = data_recv[i][1];
this->m_child_num_halos[i] = data_recv[i][2];
if (this->m_child_n[i] > max_n) { max_n = this->m_child_n[i]; }
if (this->m_child_nnz[i] > max_nnz) { max_nnz = this->m_child_nnz[i]; }
this->m_child_row_ids[i].resize(this->m_child_n[i] + this->m_child_num_halos[i]);
this->m_child_old_row_offsets[i].resize(this->m_child_n[i] + 1);
}
this->m_child_max_n = max_n;
this->m_child_max_nnz = max_nnz;
for (int i = 0; i < num_parts_to_consolidate; i++)
{
int current_part = parts_to_consolidate[i];
int cta_size = 128;
int grid_size = min(4096, (this->m_child_n[i] + this->m_child_num_halos[i] + cta_size - 1) / cta_size);
if (current_part != my_id)
{
comms->recv_vector(this->m_child_old_row_offsets[i], current_part, 10001 + current_part, 0, this->m_child_n[i] + 1);
comms->recv_vector(this->m_child_row_ids[i], current_part, 10002 + current_part, 0, this->m_child_n[i] + this->m_child_num_halos[i]);
hipLaunchKernelGGL(( zero_copy_row_lengths_ids_offsets<mat_value_type>) , dim3(grid_size), dim3(cta_size), 0, 0, this->m_child_old_row_offsets[i].raw(), this->A->row_offsets.raw(), this->m_child_row_ids[i].raw(), this->m_child_n[i], this->m_child_num_halos[i], (mat_value_type *) diag);
// Wait for kernel to finish before overwriting host buffer
hipEventRecord(event);
hipEventSynchronize(event);
}
else
{
hipLaunchKernelGGL(( zero_copy_row_lengths_ids_offsets<mat_value_type>) , dim3(grid_size), dim3(cta_size), 0, 0, this->m_old_row_offsets_CONS.raw(), this->A->row_offsets.raw(), this->m_row_ids_CONS.raw(), n, total_num_halos, (mat_value_type *) diag);
hipEventRecord(event);
hipEventSynchronize(event);
}
}
cudaCheckError();
hipEventDestroy(event);
} // If root partition
//TODO: is this necessary
comms->barrier();
}
//3. root does a exclusive_scan
if (is_root_partition)
{
hipEvent_t event;
hipEventCreate(&event);
// Populate the halo rows with diagonal, increase the length of the halo rows
thrust::fill(this->A->row_offsets.begin() + halo_offsets[0], this->A->row_offsets.begin() + halo_offsets[root_num_cons_neighbors], 1);
thrust_wrapper::exclusive_scan(this->A->row_offsets.begin(), this->A->row_offsets.end(), this->A->row_offsets.begin());
hipEventRecord(event);
hipEventSynchronize(event);
cudaCheckError();
this->A->set_initialized(0);
this->A->delProps(DIAG); // We always insert the diagonal
this->A->delProps(COO); // No COO
this->A->setColsReorderedByColor(false); // Cols not reordered by color
int nnz = this->A->row_offsets[root_num_rows]; // This is a device to host copy
this->A->resize(root_num_rows, root_num_rows, nnz, block_dimx, block_dimy);
this->A->set_num_nz(nnz); // num_nz doesn't include halo rows
//this->A->set_initialized(1);
hipEventDestroy(event);
}
else
{
this->A->set_initialized(0);
this->A->resize( 0, 0, 0, block_dimx, block_dimy );
this->A->delProps(DIAG); // We always insert the diagonal
this->A->delProps(COO); // No COO
this->A->setColsReorderedByColor(false); // Cols not reordered by color
//this->A->set_initialized(1);
}
if (useCudaIpc)
{
// ----------------------------------------------
// 4. Do ipc consolidation of values and columns
// ----------------------------------------------
// Child partition waits for parent to create row_offsets
this->ipcWaitForRoot(is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms);
void *root_col_ptr = (void *) this->A->col_indices.raw();
void *root_val_ptr = (void *) this->A->values.raw();
this->ipcExchangePtr(root_col_ptr, is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms);
this->ipcExchangePtr(root_val_ptr, is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms);
int cta_size2 = 128;
int grid_size2 = min(4096, (n + cta_size2 - 1) / cta_size2);
hipLaunchKernelGGL(( ipc_consolidation_upload_matrix<mat_value_type>) , dim3(grid_size2), dim3(cta_size2), 0, 0, n, this->m_row_ids_CONS.raw(), this->m_old_row_offsets_CONS.raw(), ( (int *) root_row_ptr ) /*IPC*/, col_indices_hd, ( (int *) root_col_ptr) /*IPC*/, data_hd, ( (mat_value_type *) root_val_ptr ) /*IPC*/, diag_hd, bsize);
cudaCheckError();
// Root partition waits for children to upload their matrices
this->ipcWaitForChildren(is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms);
cudaCheckError();
// Child partitions close their mem handle (they are done upload data)
if (!is_root_partition)
{
hipIpcCloseMemHandle(root_row_ptr);
hipIpcCloseMemHandle(root_val_ptr);
hipIpcCloseMemHandle(root_col_ptr);
}
}
else // If cudaIpcNotAvailable
{
if (!is_root_partition)
{
int dummy;
void *col_indices_to_send = this->getHostPointerForData((void *)col_indices, sizeof(index_type) * nnz, &dummy);
comms->send_raw_data(col_indices_to_send, nnz * sizeof(int), my_destination_part, 10000 + my_id);
void *data_to_send = this->getHostPointerForData((void *)data, sizeof(mat_value_type) * nnz, &dummy);
comms->send_raw_data(data_to_send, nnz * bsize * sizeof(mat_value_type), my_destination_part, 10001 + my_id);
if (diag != NULL)
{
void *diag_to_send = this->getHostPointerForData((void *)diag, sizeof(mat_value_type) * n, &dummy);
comms->send_raw_data(diag_to_send, n * bsize * sizeof(mat_value_type), my_destination_part, 10002 + my_id);
}
}
else
{
hipEvent_t event;
hipEventCreate(&event);
//TODO: Could use streams here
int *child_col_indices;
mat_value_type *child_data;
mat_value_type *child_diag = NULL;
hipHostMalloc( (void **) &child_col_indices, this->m_child_max_nnz * sizeof(int), hipHostMallocMapped);
hipHostMalloc( (void **) &child_data, this->m_child_max_nnz * bsize * sizeof(mat_value_type), hipHostMallocMapped);
if (diag != NULL)
{
hipHostMalloc( (void **) &child_diag, (this->m_child_max_n)*bsize * sizeof(mat_value_type), hipHostMallocMapped);
}
for (int i = 0; i < num_parts_to_consolidate; i++)
{
int current_part = parts_to_consolidate[i];
int cta_size2 = 128;
int grid_size2 = min(4096, (this->m_child_n[i] + cta_size2 - 1) / cta_size2);
if (current_part != my_id)
{
comms->recv_raw_data(child_col_indices, this->m_child_nnz[i]*sizeof(int), current_part, 10000 + current_part);
comms->recv_raw_data(child_data, this->m_child_nnz[i]*bsize * sizeof(mat_value_type), current_part, 10001 + current_part);
if (diag != NULL)
{
comms->recv_raw_data(child_diag, this->m_child_n[i]*bsize * sizeof(mat_value_type), current_part, 10002 + current_part);
}
int *child_col_indices_hd;
mat_value_type *child_data_hd;
mat_value_type *child_diag_hd = NULL;
hipHostGetDevicePointer(&child_col_indices_hd, child_col_indices, 0);
hipHostGetDevicePointer(&child_data_hd, child_data, 0);
if (diag != NULL)
{
hipHostGetDevicePointer(&child_diag_hd, child_diag, 0);
}
hipLaunchKernelGGL(( ipc_consolidation_upload_matrix<mat_value_type>) , dim3(grid_size2), dim3(cta_size2), 0, 0, this->m_child_n[i], this->m_child_row_ids[i].raw(), this->m_child_old_row_offsets[i].raw(), this->A->row_offsets.raw(), child_col_indices_hd, this->A->col_indices.raw(), child_data_hd, this->A->values.raw(), child_diag_hd, bsize);
// Wait for kernel to finish before overwriting host buffer
hipEventRecord(event);
hipEventSynchronize(event);
}
else
{
hipLaunchKernelGGL(( ipc_consolidation_upload_matrix<mat_value_type>) , dim3(grid_size2), dim3(cta_size2), 0, 0, n, this->m_row_ids_CONS.raw(), this->m_old_row_offsets_CONS.raw(), this->A->row_offsets.raw(), col_indices_hd, this->A->col_indices.raw(), data_hd, this->A->values.raw(), diag_hd, bsize);
hipEventRecord(event);
hipEventSynchronize(event);
}
}
cudaCheckError();
hipEventDestroy(event);
hipHostFree(child_col_indices);
hipHostFree(child_data);
if (diag != NULL)
{
hipHostFree(child_diag);
}
} // If root partition
//TODO: is this necessary
comms->barrier();
}
// Store the original fine level communicator
this->m_is_fine_level_root_partition = is_root_partition;
this->m_my_fine_level_destination_part = my_destination_part;
// Create a clone of the original communicator
this->m_fine_level_comms = comms; //this->_comms is the same pointer that this->m_fine_level_comms right now, so we can overwrite this->_comms, but make sure that we release m_fine_level_cons
this->_comms = this->m_fine_level_comms->CloneSubComm(cons_part_to_part, is_root_partition); // this->_comms will be empty comm for non-root partition and new comm for root ranks only if root partition
this->m_fine_level_id = my_id;
if (is_root_partition)
{
int cta_size = 128;
int grid_size3 = min(4096, ( (root_num_rows - halo_offsets[0]) + cta_size - 1) / cta_size);
if (grid_size3 != 0)
{
hipLaunchKernelGGL(( set_halo_cols_values) , dim3(grid_size3), dim3(cta_size), 0, 0, this->A->row_offsets.raw(), this->A->col_indices.raw(), this->A->values.raw(), halo_offsets[0], root_num_rows, bsize);
cudaCheckError();
}
int my_cons_id = part_to_cons_part[my_id];
this->_global_id = my_cons_id;
this->_num_interior_nodes = total_interior_rows_in_merged;
this->_num_boundary_nodes = total_boundary_rows_in_merged;
for (int i = 0; i < root_num_cons_neighbors; i++)
{
root_cons_neighbors[i] = part_to_cons_part[root_cons_neighbors[i]];
}
this->_comms->set_neighbors(root_num_cons_neighbors);
this->neighbors = root_cons_neighbors;
this->halo_offsets = halo_offsets; // H2D copy of halo offsets
this->m_num_fine_level_parts_to_consolidate = num_parts_to_consolidate;
this->m_fine_level_parts_to_consolidate = parts_to_consolidate;
this->set_num_halo_rings(num_rings);
this->set_num_halo_rows(halo_offsets[root_num_cons_neighbors] - halo_offsets[0]);
// B2L_maps has already been copied
this->B2L_rings.resize(root_num_cons_neighbors);
for (int i = 0; i < root_num_cons_neighbors; i++)
{
this->B2L_rings[i].resize(2);
this->B2L_rings[i][0] = 0;
this->B2L_rings[i][1] = cons_B2L_maps[i].size();
}
this->set_initialized(this->A->row_offsets);
this->A->set_initialized(0);
this->A->delProps(DIAG);
this->A->diag.resize(root_num_rows);
this->A->computeDiagonal(); //
this->A->setView(OWNED);
hipEventCreate(&(this->comm_event));
this->A->set_initialized(1);
}
else
{
this->neighbors.resize(0);
this->halo_offsets.resize(0);
}
/* free memory (if needed) */
if (col_alloc) { hipFree(col_indices_hd); }
if (data_alloc) { hipFree(data_hd); }
if (diag_alloc) { hipFree(diag_hd); }
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::replaceMatrixCoefficientsNoCons(int n, int nnz, const mat_value_type *data_pinned, const mat_value_type *diag_pinned)
{
//matrix parameters
//int num_nnz = this->A->get_num_nz();
int num_rows = this->halo_offsets[0];
int total_rows = num_rows + this->num_halo_rows();
int block_size = this->A->get_block_size();
mat_value_type *data_hd = NULL;
mat_value_type *diag_hd = NULL;
int data_alloc = 0;
int diag_alloc = 0;
//cuda parameters
int num_blocks = min(4096, (num_rows + 127) / 128);
/* WARNING: the number of non-zero elements (nnz) in the array data_pinned and A->values (num_nnz) might be different at this point.
1. If the matrix has CSR property and therefore diagonal is included in the matrix this values will be the same.
2. If the matrix has DIAG property and therefore diagonal is originally stored separately, and later appended to the array
of values, and subsequently inserted into the matrix than num_nnz = nnz + n. We have to account for this fact when replacing the
coefficients (and use nnz not num_nnz).
obs.: see calls to computeDiagonal (matrix.cu), AMGX_matrix_upload and AMGX_replace_coefficients (amgx_c.cu), and
uploadMatrix and replaceMatrixCoefficients[No|With]Cons (distributed_manager.cu) for details. */
/* check early exit */
if ((this->neighbors.size() == 0 || this->renumbering.size() == 0) && !this->m_is_fine_level_glued)
{
return;
}
cudaCheckError();
/* allocate if data and diag if they are not pinned */
data_hd = (mat_value_type *) this->getDevicePointerForData((void *)data_pinned, nnz * block_size * sizeof(mat_value_type), &data_alloc);
if (diag_pinned != NULL)
{
diag_hd = (mat_value_type *) this->getDevicePointerForData((void *)diag_pinned, num_rows * block_size * sizeof(mat_value_type), &diag_alloc);
}
/* replace the values (reordering them if needed) */
if (insertDiagonals && diag_pinned != NULL)
{
hipLaunchKernelGGL(( replace_values_matrix <32>) , dim3(num_blocks), dim3(512), 0, 0, data_hd, diag_hd, this->old_row_offsets.raw(), this->A->row_offsets.raw(), this->A->values.raw(), this->renumbering.raw(), block_size, num_rows);
}
else
{
hipLaunchKernelGGL(( replace_values_matrix <32>) , dim3(num_blocks), dim3(512), 0, 0, data_hd, this->old_row_offsets.raw(), this->A->row_offsets.raw(), this->A->values.raw(), this->renumbering.raw(), block_size, num_rows);
if (diag_pinned != NULL)
{
hipLaunchKernelGGL(( reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, this->A->values.raw() + this->A->row_offsets[total_rows]*block_size, diag_hd, this->renumbering.raw(), block_size, num_rows);
}
}
cudaCheckError();
/* free memory (if needed) */
if (data_alloc) { hipFree(data_hd); }
if (diag_alloc) { hipFree(diag_hd); }
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::replaceMatrixCoefficientsWithCons(int n, int nnz, const mat_value_type *data_pinned, const mat_value_type *diag_pinned)
{
//matrix parameters
//int num_nnz = this->A->get_num_nz();
/* WARNING: in consolidation, for non-root partitions, halo_offsets
might be NULL due to the call halo_offsets.resize(0); at the end
of the routine uploadMatrix->consolidateAndUploadAll. We should
use the parameter n instead this->halo_offsets[0] for num_rows. */
int num_rows = n;
int block_size = this->A->get_block_size();
mat_value_type *data_hd = NULL;
mat_value_type *diag_hd = NULL;
int data_alloc = 0;
int diag_alloc = 0;
data_hd = (mat_value_type *) this->getDevicePointerForData((void *)data_pinned, nnz * block_size * sizeof(mat_value_type), &data_alloc);
if (diag_pinned != NULL)
{
diag_hd = (mat_value_type *) this->getDevicePointerForData((void *)diag_pinned, num_rows * block_size * sizeof(mat_value_type), &diag_alloc);
}
bool useCudaIpc = this->m_use_cuda_ipc_consolidation;
if (useCudaIpc)
{
// Child partitions wait for root to be done
this->ipcWaitForRoot(this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms());
cudaCheckError();
void *root_row_ptr = (void *) this->A->row_offsets.raw();
void *root_val_ptr = (void *) this->A->values.raw();
this->ipcExchangePtr(root_row_ptr, this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms());
this->ipcExchangePtr(root_val_ptr, this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms());
// replace the values, insert the diagonal
int ncons = this->m_old_row_offsets_CONS.size() - 1;
int cta_size = 128;
int grid_size2 = min(4096, (ncons + cta_size - 1) / cta_size);
hipLaunchKernelGGL(( ipc_consolidation_replace_values<mat_value_type>) , dim3(grid_size2), dim3(cta_size), 0, 0, ncons, this->m_row_ids_CONS.raw(), this->m_old_row_offsets_CONS.raw(), ( (int *) root_row_ptr )/*IPC*/, data_hd, ( (mat_value_type *) root_val_ptr )/*IPC*/, diag_hd, this->A->get_block_size() );
cudaCheckError();
// Root partition wait for child to be done replacing their values
this->ipcWaitForChildren(this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms());
cudaCheckError();
if (!this->m_is_fine_level_root_partition)
{
hipIpcCloseMemHandle(root_row_ptr);
hipIpcCloseMemHandle(root_val_ptr);
}
}
else // cudaIpcNotAvailable
{
if (this->m_is_fine_level_consolidated) // aggregation
{
int bsize = this->A->get_block_size();
int ncons = this->m_old_row_offsets_CONS.size() - 1;
if (!this->m_is_fine_level_root_partition)
{
int dummy;
int nnzcons = this->m_old_nnz_CONS;
void *data_to_send = this->getHostPointerForData((void *)data_pinned, nnzcons * bsize * sizeof(mat_value_type), &dummy);
this->getFineLevelComms()->send_raw_data(data_to_send, nnzcons * bsize * sizeof(mat_value_type), this->m_my_fine_level_destination_part, 10001 + this->fine_level_id());
if (diag_pinned != NULL)
{
void *diag_to_send = this->getHostPointerForData((void *)diag_pinned, ncons * bsize * sizeof(mat_value_type), &dummy);
this->getFineLevelComms()->send_raw_data(diag_to_send, ncons * bsize * sizeof(mat_value_type), this->m_my_fine_level_destination_part, 10002 + this->fine_level_id());
}
}
else
{
hipEvent_t event;
hipEventCreate(&event);
//TODO: Could use streams here
mat_value_type *child_data;
mat_value_type *child_diag = NULL;
hipHostMalloc( (void **) &child_data, this->m_child_max_nnz * bsize * sizeof(mat_value_type), hipHostMallocMapped);
if (diag_pinned != NULL)
{
hipHostMalloc( (void **) &child_diag, (this->m_child_max_n)*bsize * sizeof(mat_value_type), hipHostMallocMapped);
}
for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++)
{
int current_part = this->m_fine_level_parts_to_consolidate[i];
int cta_size2 = 128;
int grid_size2 = min(4096, (this->m_child_n[i] + cta_size2 - 1) / cta_size2);
if (current_part != this->fine_level_id())
{
this->getFineLevelComms()->recv_raw_data(child_data, this->m_child_nnz[i]*bsize * sizeof(mat_value_type), current_part, 10001 + current_part);
if (diag_pinned != NULL)
{
this->getFineLevelComms()->recv_raw_data(child_diag, this->m_child_n[i]*bsize * sizeof(mat_value_type), current_part, 10002 + current_part);
}
mat_value_type *child_data_hd;
mat_value_type *child_diag_hd = NULL;
hipHostGetDevicePointer(&child_data_hd, child_data, 0);
if (diag_pinned != NULL)
{
hipHostGetDevicePointer(&child_diag_hd, child_diag, 0);
}
hipLaunchKernelGGL(( ipc_consolidation_replace_values<mat_value_type>) , dim3(grid_size2), dim3(cta_size2), 0, 0, this->m_child_n[i], this->m_child_row_ids[i].raw(), this->m_child_old_row_offsets[i].raw(), this->A->row_offsets.raw(), child_data_hd, this->A->values.raw(), child_diag_hd, bsize);
// Wait for kernel to finish before overwriting host buffer
hipEventRecord(event);
hipEventSynchronize(event);
}
else
{
hipLaunchKernelGGL(( ipc_consolidation_replace_values<mat_value_type>) , dim3(grid_size2), dim3(cta_size2), 0, 0, ncons, this->m_row_ids_CONS.raw(), this->m_old_row_offsets_CONS.raw(), this->A->row_offsets.raw(), data_hd, this->A->values.raw(), diag_hd, bsize);
//hipEventRecord(event);
//hipEventSynchronize(event);
}
}
cudaCheckError();
hipEventDestroy(event);
hipHostFree(child_data);
if (diag_pinned != NULL)
{
hipHostFree(child_diag);
}
} // If root partition
//TODO: is this necessary
this->getFineLevelComms()->barrier();
} //agg
else if (this->m_is_fine_level_glued) // classical
{
int bsize = this->A->get_block_size();
int ncons = this->m_old_row_offsets_CONS.size() - 1;
IVector_h nnz_off;
nnz_off.resize(this->getConsolidationArrayOffsets().size());
IVector_h nnz_array;
nnz_array.resize(this->getConsolidationArrayOffsets().size() - 1);
this->getFineLevelComms()->all_gather( nnz,
nnz_array,
this->getConsolidationArrayOffsets().size() - 1);
nnz_off[0] = 0;
for (int i = 0; i < nnz_array.size(); i++)
{
nnz_off[i + 1] = nnz_off[i] + nnz_array[i];
}
if (!this->m_is_fine_level_root_partition)
{
int dummy;
void *data_to_send = this->getHostPointerForData((void *)data_pinned, nnz * bsize * sizeof(mat_value_type), &dummy);
this->getFineLevelComms()->send_raw_data( data_to_send,
nnz * bsize * sizeof(mat_value_type),
this->m_my_fine_level_destination_part,
10001 + this->fine_level_id());
if (diag_pinned != NULL)
{
void *diag_to_send = this->getHostPointerForData((void *)diag_pinned, n * sizeof(mat_value_type), &dummy);
this->getFineLevelComms()->send_raw_data( diag_to_send,
n * bsize * sizeof(mat_value_type),
this->m_my_fine_level_destination_part,
10002 + this->fine_level_id());
//diag.resize(0);
cudaCheckError();
}
//values.resize(0);
cudaCheckError();
}
else
{
//TODO: Could use streams here
mat_value_type *child_data;
mat_value_type *child_diag = NULL;
// Assumes partions have been glued already
this->A->getNnzForView(OWNED, &nnz);
hipHostMalloc( (void **) &child_data, nnz * bsize * sizeof(mat_value_type), hipHostMallocMapped);
if (diag_pinned != NULL)
{
hipHostMalloc( (void **) &child_diag, this->halo_offsets[this->neighbors.size()]*bsize * sizeof(mat_value_type), hipHostMallocMapped);
}
// roots copy their data
memcpy ( &child_data[0], data_pinned, nnz_array[this->fine_level_id()]*sizeof(value_type));
if (diag_pinned != NULL)
{
memcpy ( &child_diag[0], diag_pinned, n * sizeof(value_type));
}
for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++)
{
int current_part = this->m_fine_level_parts_to_consolidate[i];
int current_offset = nnz_off[current_part] - nnz_off[this->fine_level_id()] ;
int current_nnz = nnz_array[current_part];
if (current_part != this->fine_level_id())
{
this->getFineLevelComms()->recv_raw_data( &child_data[current_offset],
current_nnz * bsize * sizeof(mat_value_type),
current_part,
10001 + current_part);
if (diag_pinned != NULL)
this->getFineLevelComms()->recv_raw_data( &child_diag[this->getConsolidationArrayOffsets()[current_part] - this->getConsolidationArrayOffsets()[this->fine_level_id()]],
(this->getConsolidationArrayOffsets()[current_part + 1] - this->getConsolidationArrayOffsets()[current_part])*bsize * sizeof(mat_value_type),
current_part,
10002 + current_part);
}
}
cudaCheckError();
// we can follow the usual upload path for raw data now
// Assumes partions have been glued already
int os;
this->A->getOffsetAndSizeForView(OWNED, &os, &n);
replaceMatrixCoefficientsNoCons( n, nnz, child_data, child_diag);
cudaCheckError();
hipHostFree(child_data);
if (diag_pinned != NULL)
{
hipHostFree(child_diag);
}
} // If root partition
//TODO: is this necessary
this->getFineLevelComms()->barrier();
} // cla
} // not ipc
this->A->setView(OWNED);
/* free memory (if needed) */
if (data_alloc) { hipFree(data_hd); }
if (diag_alloc) { hipFree(diag_hd); }
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::transformAndUploadVector(VVector_v &v, const void *data, int n, int block_dim)
{
if (this->isFineLevelConsolidated() || (this->isFineLevelGlued() && !this->isGlued()))
{
transformAndUploadVectorWithCons(v, data, n, block_dim);
}
else
{
v.resize(n * block_dim);
cudaCheckError();
// Upload on host
hipMemcpy(v.raw(), (value_type *)data, n * block_dim * sizeof(value_type), hipMemcpyDefault);
cudaCheckError();
// Permute based on renumbering vector
transformVector(v);
int tag = 0;
// Exchange halos
this->exchange_halo(v, tag);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::transformAndUploadVectorWithCons(VVector_v &v, const void *data_pinned, int n, int block_dim)
{
if (v.get_block_size() != this->A->get_block_dimx()) { printf("Blocksize mismatch!\n"); }
bool useCudaIpc = this->m_use_cuda_ipc_consolidation;
this->getFineLevelComms()->barrier();
void *root_temp_ptr = NULL;
VVector_v temp;
if (this->m_is_fine_level_root_partition && !this->m_is_fine_level_glued )
{
temp.resize(this->halo_offsets[this->neighbors.size()]*v.get_block_size(), types::util<value_type>::get_zero());
temp.set_block_dimx(v.get_block_dimx());
temp.set_block_dimy(v.get_block_dimy());
root_temp_ptr = (void *) temp.raw();
}
cudaCheckError();
int data_alloc = 0;
value_type *data_hd = NULL;
if (!this->m_is_fine_level_glued )
{
data_hd = (value_type *) this->getDevicePointerForData((void *)data_pinned, n * block_dim * sizeof(value_type), &data_alloc);
}
if (useCudaIpc)
{
// Do IPC
this->ipcExchangePtr(root_temp_ptr, this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms());
cudaCheckError();
int num_blocks = min(4096, (n + 511) / 512);
hipLaunchKernelGGL(( reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, (value_type *) root_temp_ptr, data_hd, this->m_row_ids_CONS.raw(), v.get_block_size(), n);
// Root partition waits for children to be done
this->ipcWaitForChildren(this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms());
cudaCheckError();
if (!this->m_is_fine_level_root_partition)
{
hipIpcCloseMemHandle(root_temp_ptr);
}
}
else // If cudaIpcNotAvail
{
if (this->m_is_fine_level_consolidated) // aggregation
{
// Exchange the vector between root and child
if (!this->m_is_fine_level_root_partition)
{
IVector_h size(1);
size[0] = n;
this->getFineLevelComms()->send_vector(size, this->m_my_fine_level_destination_part, 20000 + this->fine_level_id());
int dummy;
void *data_to_send = this->getHostPointerForData((void *)data_pinned, n * v.get_block_size() * sizeof(value_type), &dummy);
this->getFineLevelComms()->send_raw_data(data_to_send, n * v.get_block_size()*sizeof(value_type), this->m_my_fine_level_destination_part, 20001 + this->fine_level_id());
}
else
{
hipEvent_t event;
hipEventCreate(&event);
IVector_h child_n(this->m_num_fine_level_parts_to_consolidate);
int max_n = 0;
for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++)
{
int current_part = this->m_fine_level_parts_to_consolidate[i];
if (current_part != this->fine_level_id())
{
this->getFineLevelComms()->recv_vector(child_n, current_part, 20000 + current_part, i, 1);
}
else
{
child_n[i] = n;
}
if (child_n[i] > max_n) { max_n = child_n[i]; }
}
value_type *child_data;
hipHostMalloc( (void **) &child_data, max_n * v.get_block_size()*sizeof(value_type), hipHostMallocMapped);
value_type *child_data_hd;
hipHostGetDevicePointer(&child_data_hd, child_data, 0);
for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++)
{
int current_part = this->m_fine_level_parts_to_consolidate[i];
int num_blocks = min(4096, (child_n[i] + 511) / 512);
if (current_part != this->fine_level_id())
{
this->getFineLevelComms()->recv_raw_data(&child_data[0], child_n[i]*v.get_block_size()*sizeof(value_type), current_part, 20001 + current_part);
hipLaunchKernelGGL(( reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, (value_type *) root_temp_ptr, child_data_hd, this->m_child_row_ids[i].raw(), v.get_block_size(), child_n[i]);
hipEventRecord(event);
hipEventSynchronize(event);
cudaCheckError();
}
else
{
hipLaunchKernelGGL(( reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, (value_type *) root_temp_ptr, data_hd, this->m_row_ids_CONS.raw(), v.get_block_size(), n);
}
} // Loop over parts to consolidate
cudaCheckError();
hipEventDestroy(event);
hipHostFree(child_data);
} // If root partition
} //agg
else if (this->m_is_fine_level_glued) // cla
{
value_type *child_data = NULL;
if (!this->m_is_fine_level_root_partition)
{
int dummy;
void *data_to_send = this->getHostPointerForData((void *)data_pinned, n * v.get_block_size() * sizeof(value_type), &dummy);
this->getFineLevelComms()->send_raw_data( data_to_send,
n * v.get_block_size()*sizeof(value_type),
this->m_my_fine_level_destination_part,
20001 + this->fine_level_id());
//v.resize(0); // just in case something resized it betwen iterations
cudaCheckError();
}
else
{
hipHostMalloc( (void **) &child_data, this->halo_offsets[this->neighbors.size()]*v.get_block_size()*sizeof(value_type), hipHostMallocMapped);
value_type *child_data_hd;
hipHostGetDevicePointer(&child_data_hd, child_data, 0);
// roots copy their data
int dummy;
void *my_data = this->getHostPointerForData((void *)data_pinned, n * v.get_block_size() * sizeof(value_type), &dummy);
memcpy ( &child_data[0], data_pinned, n * v.get_block_size()*sizeof(value_type));
// Loop over parts to consolidate
for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++)
{
int current_part = this->m_fine_level_parts_to_consolidate[i];
if (current_part != this->fine_level_id())
{
this->getFineLevelComms()->recv_raw_data( &child_data[this->getConsolidationArrayOffsets()[current_part] - this->getConsolidationArrayOffsets()[this->fine_level_id()]],
sizeof(value_type) * (this->getConsolidationArrayOffsets()[current_part + 1] - this->getConsolidationArrayOffsets()[current_part]),
current_part,
20001 + current_part );
}
}
// usual path
// Upload on host
hipMemcpy(v.raw(), (value_type *)child_data, v.size()* sizeof(value_type), hipMemcpyDefault);
cudaCheckError();
} // If root partition
// Permute based on renumbering vector
transformVector(v);
cudaCheckError();
// Exchange halos
int tag = 0;
this->exchange_halo(v, tag);
cudaCheckError();
v.set_unconsolidated_size(n);
// free host
if (child_data) { hipHostFree(child_data); }
cudaCheckError();
} //cla
} // If cudaIpcAvailable
if (!this->m_is_fine_level_glued) // not needed for classcical
{
if (this->m_is_fine_level_root_partition)
{
v.swap(temp);
int tag = 0;
// Root partitions do the exchange
this->exchange_halo(v, tag);
}
v.set_unconsolidated_size(n * v.get_block_size());
v.set_transformed();
}
/* free memory (if needed) */
if (data_alloc) { hipFree(data_hd); }
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::transformVector(VVector_v &v)
{
if (this->neighbors.size() == 0) { return; }
else if (this->renumbering.size() == 0)
{
v.resize(this->halo_offsets[this->neighbors.size()]*v.get_block_size());
return;
}
if (v.get_block_size() != this->A->get_block_dimx()) { printf("Blocksize mismatch!\n"); }
if (v.size() < this->halo_offsets[this->neighbors.size()]*v.get_block_size())
{
VVector_v temp(this->halo_offsets[this->neighbors.size()]*v.get_block_size(), types::util<value_type>::get_zero());
temp.set_block_dimx(v.get_block_dimx());
temp.set_block_dimy(v.get_block_dimy());
if (v.size() < this->halo_offsets[0]*this->A->get_block_dimx())
{
FatalError("Unknown size of input vector - smaller than the number of rows owned by this partition", AMGX_ERR_NOT_IMPLEMENTED);
}
//reorder based on row permutation
int size = this->halo_offsets[0];
int num_blocks = min(4096, (size + 511) / 512);
hipLaunchKernelGGL(( reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, temp.raw(), v.raw(), this->renumbering.raw(), v.get_block_size(), size);
v.swap(temp);
}
else
{
VVector_v temp(this->halo_offsets[0]*v.get_block_size());
int size = this->halo_offsets[0];
int num_blocks = min(4096, (size + 511) / 512);
hipLaunchKernelGGL(( reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, temp.raw(), v.raw(), this->renumbering.raw(), v.get_block_size(), size);
thrust::copy(temp.begin(), temp.end(), v.begin());
}
cudaCheckError();
v.set_transformed();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::revertVector(VVector_v &v)
{
if (this->neighbors.size() == 0 || this->renumbering.size() == 0) { return; }
if (v.get_block_size() != this->A->get_block_dimx()) { printf("Blocksize mismatch!\n"); }
VVector_v temp(this->halo_offsets[0]*this->A->get_block_dimx());
if (v.size() < this->halo_offsets[0]*v.get_block_size())
{
FatalError("Unknown size of input vector - smaller than the number of rows owned by this partition", AMGX_ERR_NOT_IMPLEMENTED);
}
//reorder based on row permutation
int size = this->halo_offsets[0];
int num_blocks = min(4096, (size + 511) / 512);
hipLaunchKernelGGL(( inverse_reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, temp.raw(), v.raw(), this->renumbering.raw(), v.get_block_size(), size);
//reorder_vector_values<<<num_blocks, 512>>>(temp.raw(), v.raw(), this->inverse_renumbering.raw(), v.get_block_size(), size);
cudaCheckError();
v.resize(this->halo_offsets[0]*this->A->get_block_dimx());
thrust::copy(temp.begin(), temp.end(), v.begin());
cudaCheckError();
}
template <class TConfig>
void DistributedManagerBase<TConfig>::computeDestinationPartitions(INDEX_TYPE upper_threshold, float avg_size, const int num_parts, int &new_num_parts, bool &wantNeighbors)
{
m_destination_partitions.resize(num_parts);
std::vector<int> dp(num_parts);
if (avg_size < 1.f) { avg_size = 1.f; } // avoid floating point exception
int wanted_num_fine_parts_to_consolidate = ( upper_threshold + (int) avg_size - 1) / ( (int) avg_size );
new_num_parts = (num_parts + wanted_num_fine_parts_to_consolidate - 1) / wanted_num_fine_parts_to_consolidate;
for (int i = 0; i < num_parts; i++)
{
dp[i] = i % new_num_parts;
}
// example wantNeighbors = true -> destination_part = [0 0 0 0 4 4 4 4 8 8 8 8]
// example wantNeighbors = false -> destination_part = [0 1 2 3 0 1 2 3 0 1 2 3]
if (wantNeighbors)
{
std::sort (dp.begin(), dp.end());
m_destination_partitions[0] = 0;
for (int i = 1; i < num_parts; i++)
{
if (dp[i - 1] < dp[i])
{
m_destination_partitions[i] = i;
}
else
{
m_destination_partitions[i] = m_destination_partitions[i - 1];
}
}
}
m_my_destination_part = m_destination_partitions[global_id()];
}
template <class TConfig>
void DistributedManagerBase<TConfig>::computeDestinationPartitionsWithCons(int my_id, int num_parts, IVector_h &destination_part, DistributedComms<TConfig> *comms)
{
int device_id = this->A->getResources()->getDevice(0);
std::string my_hostname_tmp;
comms->get_hostname(my_hostname_tmp);
// Append PCI-E ID to string
hipDeviceProp_t dev_prop;
hipGetDeviceProperties(&dev_prop, device_id);
std::stringstream s;
s << my_hostname_tmp << "_" << dev_prop.pciBusID << "_" << dev_prop.pciDeviceID;
std::string my_hostname(s.str());
std::vector<std::string> hostnames;
comms->exchange_hostnames(my_hostname, hostnames, num_parts);
std::vector<std::string>::iterator low = std::find( hostnames.begin(), hostnames.end(), my_hostname );
int my_destination_part = low - hostnames.begin();
// Do a gather into destination_part
comms->all_gather(my_destination_part, destination_part, num_parts);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::revertAndDownloadVector(VVector_v &v_in, const void *data, int n, int block_dimy)
{
if (this->isFineLevelConsolidated() || this->isFineLevelGlued())
{
revertAndDownloadVectorWithCons(v_in, data, n, block_dimy);
}
else
{
if ( n == 0 )
{
FatalError("Cannot download if size = 0", AMGX_ERR_NOT_IMPLEMENTED);
}
if (data == NULL )
{
FatalError("Cannot download to a NULL pointer", AMGX_ERR_NOT_IMPLEMENTED);
}
if (v_in.size() == 0 )
{
FatalError("Cannot download an empty vector", AMGX_ERR_NOT_IMPLEMENTED);
}
VVector_v v_out;
revertVector(v_in, v_out);
hipMemcpy((value_type *)data, v_out.raw(), n * block_dimy * sizeof(value_type), hipMemcpyDefault);
cudaCheckError();
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::revertVector(VVector_v &v_in, VVector_v &v_out)
{
if (!this->isFineLevelGlued() && this->neighbors.size() == 0 || this->renumbering.size() == 0) { return;}
if (v_in.get_block_size() != this->A->get_block_dimx()) { printf("Blocksize mismatch!\n"); }
if (v_in.size() < this->halo_offsets[0]*v_in.get_block_size())
{
FatalError("Unknown size of input vector - smaller than the number of rows owned by this partition", AMGX_ERR_NOT_IMPLEMENTED);
}
int size = this->halo_offsets[0];
if (v_out.size() != size * this->A->get_block_dimx())
{
v_out.resize(size * this->A->get_block_dimx());
}
//reorder based on row permutation
int num_blocks = min(4096, (size + 511) / 512);
hipLaunchKernelGGL(( inverse_reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, v_out.raw(), v_in.raw(), this->renumbering.raw(), v_in.get_block_size(), size);
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::revertAndDownloadVectorWithCons(VVector_v &v_in, const void *data_pinned, int n, int block_dimy)
{
if (v_in.get_block_size() != this->A->get_block_dimx()) { printf("Blocksize mismatch!\n"); }
void *root_v_ptr = NULL;
int size = v_in.get_unconsolidated_size();
int num_rows = size / v_in.get_block_size();
if (this->m_is_fine_level_root_partition)
{
root_v_ptr = (void *) v_in.raw();
}
VVector_v temp;
temp.set_block_dimx(v_in.get_block_dimx());
temp.set_block_dimy(v_in.get_block_dimy());
temp.resize(size);
bool useCudaIpc = this->m_use_cuda_ipc_consolidation;
if (useCudaIpc)
{
// Do IPC
this->ipcExchangePtr(root_v_ptr, this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms());
// Children partition waits for parent to be done updating vector
this->ipcWaitForRoot(this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms());
cudaCheckError();
//reorder based on row permutation
int num_blocks = min(4096, (num_rows + 511) / 512);
hipLaunchKernelGGL(( inverse_reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, temp.raw(), (value_type *) root_v_ptr, this->m_row_ids_CONS.raw(), v_in.get_block_size(), num_rows);
cudaCheckError();
if (!this->m_is_fine_level_root_partition)
{
hipIpcCloseMemHandle(root_v_ptr);
}
}
else
{
if (this->m_is_fine_level_consolidated) // aggregation
{
if (this->m_is_fine_level_root_partition)
{
IVector_h child_n(this->m_num_fine_level_parts_to_consolidate);
int max_n = 0;
for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++)
{
int current_part = this->m_fine_level_parts_to_consolidate[i];
if (current_part != this->fine_level_id())
{
this->getFineLevelComms()->recv_vector(child_n, current_part, 30000 + current_part, i, 1);
}
else
{
child_n[i] = num_rows;
}
if (child_n[i] > max_n) { max_n = child_n[i]; }
}
// Resize temp vector
VVector_v child_temp;;
child_temp.resize(max_n * v_in.get_block_size());
for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++)
{
int current_part = this->m_fine_level_parts_to_consolidate[i];
// Pack the vector to be sent
int num_blocks = min(4096, (child_n[i] + 511) / 512);
if (current_part != this->fine_level_id())
{
hipLaunchKernelGGL(( inverse_reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, child_temp.raw(), (value_type *) root_v_ptr, this->m_child_row_ids[i].raw(), v_in.get_block_size(), child_n[i]);
this->getFineLevelComms()->send_vector(child_temp, current_part, 30001 + current_part, 0, child_n[i]*v_in.get_block_size());
}
else
{
hipLaunchKernelGGL(( inverse_reorder_vector_values) , dim3(num_blocks), dim3(512), 0, 0, temp.raw(), (value_type *) root_v_ptr, this->m_row_ids_CONS.raw(), v_in.get_block_size(), child_n[i]);
}
}
cudaCheckError();
}
else
{
IVector_h size(1);
size[0] = num_rows;
this->getFineLevelComms()->send_vector(size, this->m_my_fine_level_destination_part, 30000 + this->fine_level_id());
this->getFineLevelComms()->recv_vector(temp, this->m_my_fine_level_destination_part, 30001 + this->fine_level_id());
}
}
else if (this->m_is_fine_level_glued) // classical
{
if (this->m_is_fine_level_root_partition)
{
temp.resize(v_in.size());
revertVector(v_in, temp);
cudaCheckError();
for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++)
{
int current_part = this->m_fine_level_parts_to_consolidate[i];
if (current_part != this->fine_level_id())
{
this->getFineLevelComms()->send_vector( temp,
current_part,
current_part + 30001,
this->getConsolidationArrayOffsets()[current_part] - this->getConsolidationArrayOffsets()[this->fine_level_id()],
this->getConsolidationArrayOffsets()[current_part + 1] - this->getConsolidationArrayOffsets()[current_part] );
cudaCheckError();
}
}
}
else
{
this->getFineLevelComms()->recv_vector(temp, this->m_my_fine_level_destination_part, 30001 + this->fine_level_id());
cudaCheckError();
}
temp.resize(this->getConsolidationArrayOffsets()[this->fine_level_id() + 1] - this->getConsolidationArrayOffsets()[this->fine_level_id()]);
cudaCheckError();
}
}
// Copy on host
hipMemcpy((value_type *)data_pinned, temp.raw(), temp.size() * sizeof(value_type), hipMemcpyDefault);
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::transformAndUploadVector(VVector_v &v, const void *data, int n, int block_dim)
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::transformVector(VVector_v &v)
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::transformAndUploadVectorWithCons(VVector_v &v, const void *data, int n, int block_dim)
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::revertAndDownloadVector(VVector_v &v, const void *data, int n, int block_dim)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::revertVector(VVector_v &v_in, VVector_v &v_out)
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::revertVector(VVector_v &v)
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::revertAndDownloadVectorWithCons(VVector_v &v_in, const void *data, int n, int block_dim)
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::reorder_matrix()
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::reorder_matrix_owned()
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::obtain_shift_l2g_reordering(index_type n, I64Vector_h &l2g, IVector_h &p, IVector_h &q)
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::unpack_partition(index_type *Bp, index_type *Bc, mat_value_type *Bv)
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::generatePoisson7pt(int nx, int ny, int nz, int P, int Q, int R)
{
FatalError("GeneratePoisson7pt only implemented on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
template <typename t_colIndex>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributedMatrix(
int num_rows, int num_nonzeros, const int block_dimx, const int block_dimy, const int *row_offsets,
const t_colIndex *col_indices, const mat_value_type *values, int num_ranks, int num_rows_global, const void *diag, const MatrixDistribution &dist)
{
FatalError("loadDistributedMatrix only implemented on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::createOneRingB2Lmaps()
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::createOneRingHaloRows()
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::replaceMatrixCoefficientsNoCons(int n, int nnz, const mat_value_type *data, const mat_value_type *diag)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::replaceMatrixCoefficientsWithCons(int n, int nnz, const mat_value_type *data, const mat_value_type *diag)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::createRenumbering(IVector &renumbering)
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template < class TConfig >
void DistributedManagerBase<TConfig>::remove_boundary(IVector_h &flagArray, IVector_h &B2L_map, int size)
{
for (int i = 0; i < size; i++)
{
flagArray[B2L_map[i]] = 0;
}
}
template < class TConfig >
void DistributedManagerBase<TConfig>::get_unassigned(IVector_h &flagArray, IVector_h &B2L_map, IVector_h &partition_flags, int size, int fa_size/*, int rank*/)
{
for (int i = 0; i < size; i++)
{
if (B2L_map[i] < fa_size)
{
if (flagArray[B2L_map[i]] == 0)
{
flagArray[B2L_map[i]] = 1;
partition_flags[i] = 1;
}
}
}
}
template < class TConfig >
void DistributedManagerBase<TConfig>::set_unassigned(IVector_h &partition_flags, IVector_h &partition_renum, IVector_h &B2L_map, IVector_h &renumbering, int size, int max_element, int renum_size/*, int rank*/)
{
for (int i = 0; i < size; i++)
{
if (B2L_map[i] < renum_size)
{
if (partition_flags[i] == 1)
{
renumbering[B2L_map[i]] = max_element + partition_renum[i];
}
B2L_map[i] = renumbering[B2L_map[i]];
}
}
}
/* print manager for target rank to a file or stdout */
template<class TConfig>
void DistributedManagerBase<TConfig>::print(char *f, char *s, int trank)
{
DistributedManagerBase<TConfig> *m = this;
int rank = 0;
int level = 0;
char filename[1024];
FILE *fid = NULL;
int i, j, k, t1, t2;
#ifdef AMGX_WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#endif
//check target rank
if (rank == trank)
{
//check whether to output to stdout or a file
if (f == NULL)
{
fid = stdout;
}
else
{
level = m->A->amg_level_index;
#ifdef _WIN32
_snprintf_s(filename, 1024, 1024, "%s_r%d_l%d.mtx", f, rank, level);
#else
snprintf(filename, 1024, "%s_r%d_l%d.mtx", f, rank, level);
#endif
fid = fopen(filename, "w");
}
hipDeviceSynchronize();
cudaCheckError();
fprintf(fid, "%s\n", s);
//--- communication info ---
//compare neighbors
t1 = m->neighbors.size();
fprintf(fid, "neighbors %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->neighbors[i];
fprintf(fid, "%d\n", k);
}
//compare B2L_rings
t1 = B2L_rings.size();
fprintf(fid, "B2L_rings %d\n", t1);
for (i = 0; i < t1; i++)
{
t2 = m->B2L_rings[i].size();
fprintf(fid, "B2L_rings-%d [%d]\n", i, t2);
for (j = 0; j < t2; j++)
{
k = m->B2L_rings[i][j];
fprintf(fid, "%d\n", k);
}
}
//compare B2L_maps
t1 = B2L_maps.size();
fprintf(fid, "B2L_maps %d\n", t1);
for (i = 0; i < t1; i++)
{
t2 = m->B2L_maps[i].size();
fprintf(fid, "B2L_maps-%d [%d]\n", i, t2);
for (j = 0; j < t2; j++)
{
k = m->B2L_maps[i][j];
fprintf(fid, "%d\n", k);
}
}
//compare L2H_maps
t1 = L2H_maps.size();
fprintf(fid, "L2H_maps %d\n", t1);
for (i = 0; i < t1; i++)
{
t2 = m->L2H_maps[i].size();
fprintf(fid, "L2H_maps-%d [%d]\n", i, t2);
for (j = 0; j < t2; j++)
{
k = m->L2H_maps[i][j];
fprintf(fid, "%d\n", k);
}
}
//--- matrix info ---
fprintf(fid, "num_rows_global=%ld\n", num_rows_global);
fprintf(fid, "_num_rows_interior=%d\n", m->_num_rows_interior);
fprintf(fid, "_num_rows_owned=%d\n", m->_num_rows_owned);
fprintf(fid, "_num_rows_full=%d\n", m->_num_rows_full);
fprintf(fid, "_num_rows_all=%d\n", m->_num_rows_all);
fprintf(fid, "_num_nz_interior=%d\n", m->_num_nz_interior);
fprintf(fid, "_num_nz_owned=%d\n", m->_num_nz_owned);
fprintf(fid, "_num_nz_full=%d\n", m->_num_nz_full);
fprintf(fid, "_num_nz_all=%d\n", m->_num_nz_all);
//compare # halo rows and halo offsets
fprintf(fid, "# halo rings %d and rows %d\n", m->num_halo_rings(), m->num_halo_rows());
t1 = m->halo_offsets.size();
fprintf(fid, "halo_offsets %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->halo_offsets[i];
fprintf(fid, "%d\n", k);
}
//compare halo ranges
t1 = m->halo_ranges.size();
fprintf(fid, "halo_ranges %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->halo_ranges[i];
fprintf(fid, "%d\n", k);
}
//compare halo ranges (host)
t1 = m->halo_ranges_h.size();
fprintf(fid, "halo_ranges_h %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->halo_ranges_h[i];
fprintf(fid, "%d\n", k);
}
//compare part offsets
t1 = m->part_offsets.size();
fprintf(fid, "part_offsets %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->part_offsets[i];
fprintf(fid, "%d\n", k);
}
//compare part offsets (host)
t1 = m->part_offsets_h.size();
fprintf(fid, "part_offsets_h %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->part_offsets_h[i];
fprintf(fid, "%d\n", k);
}
//compare interior row list
t1 = m->interior_rows_list.size();
fprintf(fid, "interior_rows_list %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->interior_rows_list[i];
fprintf(fid, "%d\n", k);
}
//compare boundary row list
t1 = m->boundary_rows_list.size();
fprintf(fid, "boundary_rows_list %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->boundary_rows_list[i];
fprintf(fid, "%d\n", k);
}
//compare halo1 row list
t1 = m->halo1_rows_list.size();
fprintf(fid, "halo1_rows_list %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->halo1_rows_list[i];
fprintf(fid, "%d\n", k);
}
fprintf(fid, "pointers halo_rows=%p and halo_btl=%p\n", m->halo_rows, m->halo_btl);
//--- packing info ---
//compare local to global map
t1 = m->local_to_global_map.size();
fprintf(fid, "local_to_global_map %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->local_to_global_map[i];
fprintf(fid, "%d\n", k);
}
//compare renumbering
t1 = m->renumbering.size();
fprintf(fid, "renumbering %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->renumbering[i];
fprintf(fid, "%d\n", k);
}
//compare inverse renumbering
t1 = m->inverse_renumbering.size();
fprintf(fid, "inverse_renumbering %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->inverse_renumbering[i];
fprintf(fid, "%d\n", k);
}
//--- GPU related and miscellaneous info
//streams
fprintf(fid, "streams i=%p, b=%p\n", m->get_int_stream(), m->get_bdy_stream());
//miscellaneous info
int64_t bi = m->base_index(); //inlined function
int np = m->get_num_partitions(); //inlined function
int rp = (int)m->isRootPartition(); //cast from boolean to int
fprintf(fid, "gid=%d,bi=%ld,np=%d,rp=%d,ir=%d,in=%d,bn=%d\n", m->global_id(), bi, np, rp, m->index_range(), m->num_interior_nodes(), m->num_boundary_nodes());
hipDeviceSynchronize();
hipGetLastError();
if (fid != stdout)
{
fclose(fid);
}
}
}
/* print manager for target rank to a file or stdout (for all ranks) */
template<class TConfig>
void DistributedManagerBase<TConfig>::printToFile(char *f, char *s)
{
DistributedManagerBase<TConfig> *m = this;
int rank = 0;
#ifdef AMGX_WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#endif
//notice that print will be called with different (target) rank on different ranks/processes
m->print(f, s, rank);
}
/* compare two managers */
template<class TConfig>
int DistributedManagerBase<TConfig>::compare(DistributedManagerBase<TConfig> *m2)
{
DistributedManagerBase<TConfig> *m1 = this;
int i, j, t1, t2;
//compare neighbors
t1 = m1->neighbors.size();
t2 = m2->neighbors.size();
if (t1 != t2)
{
return 1;
}
for (i = 0; i < t1; i++)
{
if (m1->neighbors[i] != m2->neighbors[i])
{
return 2;
}
}
//compare B2L_rings
for (i = 0; i < (m1->neighbors.size()); i++)
{
t1 = m1->B2L_rings[i].size();
t2 = m2->B2L_rings[i].size();
if (t1 != t2)
{
return 3;
}
for (j = 0; j < t1; j++)
{
if (m1->B2L_rings[i][j] != m2->B2L_rings[i][j])
{
return 4;
}
}
}
//compare B2L_maps
t1 = m1->B2L_maps.size();
t2 = m2->B2L_maps.size();
if (t1 != t2)
{
return 5;
}
for (i = 0; i < t1; i++)
{
if (m1->B2L_maps[i] != m2->B2L_maps[i])
{
return 6;
}
}
//compare L2H_maps
t1 = m1->L2H_maps.size();
t2 = m2->L2H_maps.size();
if (t1 != t2)
{
return 7;
}
for (i = 0; i < t1; i++)
{
if (m1->L2H_maps[i] != m2->L2H_maps[i])
{
return 8;
}
}
return 0;
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
DistributedManager< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::~DistributedManager< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >()
{
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
DistributedManager< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::~DistributedManager< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >()
{
}
template <class TConfig>
void DistributedManagerBase<TConfig>::consolidateB2Lmaps(IVector_h_vector &dest_coarse_B2L_maps, IVector_h_vector &coarse_B2L_maps, IVector_h &fine_neigh_to_coarse_neigh, int num_coarse_neighbors, int num_fine_neighbors)
{
consB2Lmaps(dest_coarse_B2L_maps, coarse_B2L_maps, fine_neigh_to_coarse_neigh, num_coarse_neighbors, num_fine_neighbors);
}
template <class TConfig>
void DistributedManagerBase<TConfig>::consolidateB2Lmaps(IVector_d_vector &dest_coarse_B2L_maps, IVector_d_vector &coarse_B2L_maps, IVector_h &fine_neigh_to_coarse_neigh, int num_coarse_neighbors, int num_fine_neighbors)
{
consB2Lmaps(dest_coarse_B2L_maps, coarse_B2L_maps, fine_neigh_to_coarse_neigh, num_coarse_neighbors, num_fine_neighbors);
}
template <class TConfig>
void DistributedManagerBase<TConfig>::consolidateB2LmapsOnRoot(int &num_consolidated_neighbors, IVector_h_vector &consolidated_B2L_maps, IVector_h &consolidated_coarse_ids, IVector_h_vector &dest_coarse_B2L_maps, IVector_h &coarse_neigh_to_fine_part, IVector_h &num_bdy_per_coarse_neigh, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int my_id, int my_destination_part, bool is_root_partition, int num_coarse_neighbors, DistributedComms<TConfig> *comms)
{
consB2LmapsOnRoot(num_consolidated_neighbors, consolidated_B2L_maps, consolidated_coarse_ids, dest_coarse_B2L_maps, coarse_neigh_to_fine_part, num_bdy_per_coarse_neigh, fine_parts_to_consolidate, num_fine_parts_to_consolidate, my_id, my_destination_part, is_root_partition, num_coarse_neighbors, comms);
}
template <class TConfig>
void DistributedManagerBase<TConfig>::consolidateB2LmapsOnRoot(int &num_consolidated_neighbors, IVector_d_vector &consolidated_B2L_maps, IVector_h &consolidated_coarse_ids, IVector_d_vector &dest_coarse_B2L_maps, IVector_h &coarse_neigh_to_fine_part, IVector_h &num_bdy_per_coarse_neigh, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int my_id, int my_destination_part, bool is_root_partition, int num_coarse_neighbors, DistributedComms<TConfig> *comms)
{
consB2LmapsOnRoot(num_consolidated_neighbors, consolidated_B2L_maps, consolidated_coarse_ids, dest_coarse_B2L_maps, coarse_neigh_to_fine_part, num_bdy_per_coarse_neigh, fine_parts_to_consolidate, num_fine_parts_to_consolidate, my_id, my_destination_part, is_root_partition, num_coarse_neighbors, comms);
}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class DistributedManager<TemplateMode<CASE>::Type >;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template void DistributedManager<TemplateMode<CASE>::Type>::loadDistributedMatrix( \
int, int, const int, const int, const int*, const int *col_indices, const mat_value_type*, int, int, const void*, const MatrixDistribution &dist); \
template void DistributedManager<TemplateMode<CASE>::Type>::loadDistributedMatrix( \
int, int, const int, const int, const int*, const int64_t *col_indices, const mat_value_type*, int, int, const void*, const MatrixDistribution &dist);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class DistributedManagerBase<TemplateMode<CASE>::Type >;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace amgx
| c9d4debb6f4203f5737a75dd2acc79b10ef80e51.cu | /* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <distributed/distributed_manager.h>
#include <distributed/comms_mpi_gpudirect.h>
#include <distributed/comms_mpi_hostbuffer_stream.h>
#include <distributed/comms_visitors.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <thrust/remove.h>
#include <thrust/unique.h>
#include <thrust/binary_search.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust_wrapper.h>
#include <basic_types.h>
#include <error.h>
#include <util.h>
#include <types.h>
#include <iostream>
#include <sstream>
#include <fstream>
#include <assert.h>
#include "cuda_runtime.h"
#include "reorder_partition.h"
#include "amgx_types/util.h"
#include <algorithm>
#include <iostream> //debug only:
struct is_my_part : public thrust::unary_function<int, bool>
{
const int _my_part;
is_my_part(int my_part) : _my_part(my_part) { }
__host__ __device__
bool operator()(const int part)
{
return (part == _my_part);
}
};
using namespace std;
namespace amgx
{
static int insertDiagonals = 1;
template <typename index_type>
static __device__ __forceinline__
index_type internal_index(index_type i, index_type j, index_type k, index_type nx, index_type ny, index_type nz)
{
return k * (nx * ny) + j * nx + i;
}
template <typename index_type>
static __device__ __forceinline__
int64_t get_global_offset(index_type p, index_type q, index_type r, index_type P, index_type Q, index_type R, index_type num_rows)
{
int rank_id = r * (P * Q) + q * P + p;
return ((int64_t) rank_id) * ((int64_t) num_rows);
}
template <typename index_type>
__global__
void poisson7pt_count_row_len(index_type *row_len, index_type nx, index_type ny, index_type nz, index_type p, index_type q, index_type r, index_type P, index_type Q, index_type R, index_type num_rows)
{
for (int tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < num_rows ; tidx += blockDim.x * gridDim.x)
{
/* compute p,q,r from P,Q,R and myid */
int i = tidx % nx; // Position in x direction
int j = (( tidx - i) / nx) % ny; // Position in y
int k = ( tidx - i - nx * j) / ( nx * ny ); // Position in z
int substract = ((i == 0) && (p == 0));
substract += ((i == nx - 1) && (p == P - 1));
substract += ((j == 0) && (q == 0));
substract += ((j == ny - 1) && (q == Q - 1));
substract += ((k == 0) && (r == 0));
substract += ((k == nz - 1) && (r == R - 1));
// Store 7 in position (num_rows+1), such that row_len[num_rows+1] = 0
//substract = (tidx == num_rows+1) ? 7 : substract;
row_len[tidx] = 7 - substract;
}
}
template <typename index_type, typename mat_value_type>
__global__
void poisson7pt_set_col_values(const index_type *__restrict__ row_offsets, index_type *__restrict__ col_indices, mat_value_type *__restrict__ values, index_type *__restrict__ diag, int64_t *__restrict__ local_to_global, index_type nx, index_type ny, index_type nz, index_type p, index_type q, index_type r, index_type P, index_type Q, index_type R, index_type num_rows)
{
for (int row = threadIdx.x + blockIdx.x * blockDim.x; row < num_rows ; row += blockDim.x * gridDim.x)
{
/* compute p,q,r from P,Q,R and myid */
int i = row % nx; // Position in x direction
int j = (( row - i) / nx) % ny; // Position in y
int k = ( row - i - nx * j) / ( nx * ny ); // Position in z
int halo_offset = num_rows;
int pos = row_offsets[row];
// Diagonal element
diag[row] = pos;
col_indices[pos] = row;
values[pos++] = types::util<mat_value_type>::get_one() * 6.;
// ----------------------------
// Neighbor at position i-1
// ----------------------------
if (i)
{
// Has a i-1 neighbor, which is an internal node at position (i-1,j,k)
col_indices[pos] = internal_index(i - 1, j, k, nx, ny, nz);
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
}
else if (p)
{
// Has a i-1 neighbor, which is a halo node
int halo_index = halo_offset + k * ny + j;
col_indices[pos] = halo_index;
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
int64_t global_offset = get_global_offset(p - 1, q, r, P, Q, R, num_rows);
local_to_global[halo_index - num_rows] = global_offset + internal_index(nx - 1, j, k, nx, ny, nz);
}
if (p)
{
halo_offset += ny * nz;
}
// ----------------------------
// Neighbor at position i+1
// ----------------------------
if (i < nx - 1)
{
// Has i+1 neighbor, which is an internal node at position (i+1,j,k)
col_indices[pos] = internal_index(i + 1, j, k, nx, ny, nz);
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
}
else
{
if (p < P - 1)
{
// Has i+1 neighbor, which is a halo node
int halo_index = halo_offset + k * ny + j;
col_indices[pos] = halo_index;
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
int64_t global_offset = get_global_offset(p + 1, q, r, P, Q, R, num_rows);
local_to_global[halo_index - num_rows] = global_offset + internal_index(0, j, k, nx, ny, nz);
}
}
if (p < P - 1)
{
halo_offset += ny * nz;
}
// ----------------------------
// Neighbor at position j-1
// ----------------------------
if (j)
{
// Has a j-1 neighbor, which is an internal node at position (i,j-1,k)
col_indices[pos] = internal_index(i, j - 1, k, nx, ny, nz);
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
}
else if (q)
{
// Has a j-1 neighbor, which is a halo node
int halo_index = halo_offset + k * nx + i;
col_indices[pos] = halo_index;
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
int64_t global_offset = get_global_offset(p, q - 1, r, P, Q, R, num_rows);
local_to_global[halo_index - num_rows] = global_offset + internal_index(i, ny - 1, k, nx, ny, nz);
}
if (q)
{
halo_offset += nx * nz;
}
// ----------------------------
// Neighbor at position j+1
// ----------------------------
if (j < ny - 1)
{
// Has a j+1 neighbor, which is an internal node at position (i,j+1,k)
col_indices[pos] = internal_index(i, j + 1, k, nx, ny, nz);
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
}
else
{
if (q < Q - 1)
{
// Has a j+1 neighbor, which is a halo node
int halo_index = halo_offset + k * nx + i;
col_indices[pos] = halo_index;
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
int64_t global_offset = get_global_offset(p, q + 1, r, P, Q, R, num_rows);
local_to_global[halo_index - num_rows] = global_offset + internal_index(i, 0, k, nx, ny, nz);
}
}
if (q < Q - 1)
{
halo_offset += nx * nz;
}
// ----------------------------
// Neighbor at position k-1
// ----------------------------
if (k)
{
// Has a k-1 neighbor, which is an internal node at position (i,j,k-1)
col_indices[pos] = internal_index(i, j, k - 1, nx, ny, nz);
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
}
else if (r)
{
// Has a k-1 neighbor, which is a halo node
int halo_index = halo_offset + j * nx + i;
col_indices[pos] = halo_index;
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
int64_t global_offset = get_global_offset(p, q, r - 1, P, Q, R, num_rows);
local_to_global[halo_index - num_rows] = global_offset + internal_index(i, j, nz - 1, nx, ny, nz);
}
if (r)
{
halo_offset += nx * ny;
}
// ----------------------------
// Neighbor at position k+1
// ----------------------------
if (k < nz - 1)
{
// Has a k+1 neighbor, which is an internal node at position (i,j,k+1)
col_indices[pos] = internal_index(i, j, k + 1, nx, ny, nz);
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
}
else
{
if (r < R - 1)
{
// Has a k+1 neighbor, which is a halo node
int halo_index = halo_offset + j * nx + i;
col_indices[pos] = halo_index;
values[pos++] = types::util<mat_value_type>::invert(types::util<mat_value_type>::get_one());
int64_t global_offset = get_global_offset(p, q, r + 1, P, Q, R, num_rows);
local_to_global[halo_index - num_rows] = global_offset + internal_index(i, j, 0, nx, ny, nz);
}
}
if (r < R - 1)
{
halo_offset += nx * ny;
}
}
}
template <typename mat_value_type>
__global__
void set_halo_cols_values(int *row_offsets, int *col_indices, mat_value_type *values, int n, int total_rows, int bsize)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < (total_rows - n) )
{
int offset = row_offsets[n + tid];
col_indices[offset] = n + tid;
#pragma unroll
for (int i = 0; i < bsize; i++)
{
values[offset * bsize + i] = types::util<mat_value_type>::get_one(); // This is arbitrary
}
tid += gridDim.x * blockDim.x;
}
}
template <typename mat_value_type>
__global__
void zero_copy_row_lengths_ids_offsets(int *d_old_row_offsets, int *root_row_offsets, int *d_row_ids, int n, int total_num_halos, mat_value_type *diag)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < n + total_num_halos)
{
int new_row_id = d_row_ids[tid];
if (tid < n)
{
int start = d_old_row_offsets[tid];
int row_length = d_old_row_offsets[tid + 1] - start; // zero-copy
if (diag != NULL) // will insert the diagonal
{
row_length++;
}
root_row_offsets[new_row_id] = row_length;
}
tid += gridDim.x * blockDim.x;
}
}
template< typename mat_value_type>
__global__
void ipc_consolidation_upload_matrix(int num_rows, int *row_ids, const int *old_row_offsets, int *new_row_offsets, const int *h_old_col_indices, int *new_col_indices, const mat_value_type *h_old_values, mat_value_type *new_values, const mat_value_type *h_old_diag, int bsize)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
while (row < num_rows)
{
int new_row = row_ids[row];
int src_base = old_row_offsets[row];
int dst_base = new_row_offsets[new_row];
// Insert the diagonal at the beginning of each row
if (h_old_diag != NULL)
{
new_col_indices[dst_base] = new_row;
#pragma unroll
for (int j = 0; j < bsize; j++)
{
new_values[dst_base * bsize + j] = h_old_diag[row * bsize + j];
}
// Increment dst_base by one
dst_base++;
}
int end = old_row_offsets[row + 1] - src_base;
for (int i = 0; i < end; i++)
{
int old_col = h_old_col_indices[src_base + i];
int new_col = row_ids[old_col];
new_col_indices[dst_base + i] = new_col;
#pragma unroll
for (int j = 0; j < bsize; j++)
{
new_values[ (dst_base + i)*bsize + j ] = h_old_values[ (src_base + i) * bsize + j ];
}
}
row += gridDim.x * blockDim.x;
}
}
template< typename mat_value_type>
__global__
void ipc_consolidation_replace_values(int num_rows, int *row_ids, const int *old_row_offsets, int *new_row_offsets, const mat_value_type *h_old_values, mat_value_type *new_values, const mat_value_type *h_old_diag, int bsize)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
while (row < num_rows)
{
int new_row = row_ids[row];
int src_base = old_row_offsets[row];
int dst_base = new_row_offsets[new_row];
// Insert the diagonal at the beginning of each row
if (h_old_diag != NULL)
{
#pragma unroll
for (int j = 0; j < bsize; j++)
{
new_values[dst_base * bsize + j] = h_old_diag[row * bsize + j];
}
// Increment dst_base by one
dst_base++;
}
int end = old_row_offsets[row + 1] - src_base;
for (int i = 0; i < end; i++)
{
#pragma unroll
for (int j = 0; j < bsize; j++)
{
new_values[ (dst_base + i)*bsize + j ] = h_old_values[ (src_base + i) * bsize + j ];
}
}
row += gridDim.x * blockDim.x;
}
}
__global__ void flag_halo_ids_kernel(INDEX_TYPE *flags, INDEX_TYPE *ids, INDEX_TYPE offset, INDEX_TYPE size, INDEX_TYPE upper)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < size)
{
flags[ids[idx] - offset] = 1;
idx += blockDim.x * gridDim.x;
}
}
__global__ void read_halo_ids_kernel(INDEX_TYPE *flags, INDEX_TYPE *ids, INDEX_TYPE offset, INDEX_TYPE size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < size)
{
ids[idx] = flags[ids[idx] - offset];
idx += blockDim.x * gridDim.x;
}
}
template<class T>
__global__ void reorder_vector_values(T *dst, const T *src, const INDEX_TYPE *map, INDEX_TYPE blocksize, INDEX_TYPE num_rows)
{
int row = blockIdx.x * (blockDim.x / blocksize) + threadIdx.x / blocksize; //vectorised by block size
int vec_id = threadIdx.x % blocksize;
if (threadIdx.x >= (blockDim.x / blocksize)*blocksize ) { return; }
while (row < num_rows)
{
dst[map[row]*blocksize + vec_id] = src[row * blocksize + vec_id];
row += gridDim.x * (blockDim.x / blocksize);
}
}
template<class T>
__global__ void inverse_reorder_vector_values(T *dst, T *src, INDEX_TYPE *map, INDEX_TYPE blocksize, INDEX_TYPE num_rows)
{
int row = blockIdx.x * (blockDim.x / blocksize) + threadIdx.x / blocksize;
int vec_id = threadIdx.x % blocksize;
if (threadIdx.x >= (blockDim.x / blocksize)*blocksize ) { return; }
while (row < num_rows)
{
dst[row * blocksize + vec_id] = src[map[row] * blocksize + vec_id];
row += gridDim.x * (blockDim.x / blocksize);
}
}
__global__ void remove_boundary_kernel(INDEX_TYPE *flags, INDEX_TYPE *maps, INDEX_TYPE size)
{
int element = blockIdx.x * blockDim.x + threadIdx.x;
while (element < size)
{
flags[maps[element]] = 0; //this won't be a problem, because we are overwriting the same thing
element += blockDim.x * gridDim.x;
}
}
__global__ void get_unassigned_kernel(INDEX_TYPE *unassigned_flags, INDEX_TYPE *map, INDEX_TYPE *output, INDEX_TYPE part_size, INDEX_TYPE uf_size )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < part_size)
{
if (map[idx] < uf_size)
{
if (unassigned_flags[map[idx]] == 0)
{
unassigned_flags[map[idx]] = 1;
output[idx] = 1;
}
}
idx += blockDim.x * gridDim.x;
}
}
__global__ void set_unassigned_kernel(INDEX_TYPE *part_assigned_flags, INDEX_TYPE *part_num, INDEX_TYPE *map, INDEX_TYPE *renum, INDEX_TYPE part_size, INDEX_TYPE max_element, INDEX_TYPE renum_size /*, INDEX_TYPE rank*/)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < part_size)
{
if (map[idx] < renum_size)
{
if (part_assigned_flags[idx] == 1)
{
renum[map[idx]] = max_element + part_num[idx];
}
//also update the B2L map
map[idx] = renum[map[idx]];
}
idx += blockDim.x * gridDim.x;
}
}
__global__ void renumber_b2l_maps(INDEX_TYPE *map, INDEX_TYPE *renum, INDEX_TYPE part_size, INDEX_TYPE renum_size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < part_size)
{
if (map[idx] < renum_size)
{
//update the B2L map
map[idx] = renum[map[idx]];
idx += blockDim.x * gridDim.x;
}
}
}
__global__ void calc_inverse_renumbering(INDEX_TYPE *renum, INDEX_TYPE *irenum, INDEX_TYPE max_element)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < max_element)
{
if (renum[idx] < 0 || renum[idx] >= max_element) { printf("Renumbering error: %d %d\n", renum[idx], max_element); }
irenum[renum[idx]] = idx;
idx += blockDim.x * gridDim.x;
}
}
__global__ void create_halo_mapping(INDEX_TYPE *mapping, INDEX_TYPE *node_list, int64_t base_index, INDEX_TYPE map_offset, INDEX_TYPE size)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
while (row < size)
{
int idx = node_list[row] - base_index;
mapping[idx] = map_offset + row;
row += blockDim.x * gridDim.x;
}
}
__global__ void apply_h2l2b_mapping(INDEX_TYPE *mapping, INDEX_TYPE *node_list, int64_t base_index, INDEX_TYPE *b2l_map, INDEX_TYPE size)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
while (row < size)
{
int idx = node_list[row] - base_index;
mapping[idx] = b2l_map[row];
row += blockDim.x * gridDim.x;
}
}
template <int coop>
__global__ void map_col_indices_and_count_rowlen(INDEX_TYPE *row_offsets, INDEX_TYPE *col_indices, INDEX_TYPE *row_length,
INDEX_TYPE *mapping, INDEX_TYPE num_rows, INDEX_TYPE insert_diagonal)
{
extern __shared__ volatile int reduction[];
int row = blockIdx.x * blockDim.x / coop + threadIdx.x / coop;
int coopIdx = threadIdx.x % coop;
while (row < num_rows)
{
int valid = 0;
for (int idx = row_offsets[row] + coopIdx; idx < row_offsets[row + 1]; idx += coop) //this may look horrible, but I expect low branch divergence, because col indices in a row usually belong to the same partition (or at most one more)
{
int colIdx = col_indices[idx];
int new_col_idx = mapping[colIdx];
if (new_col_idx >= 0)
{
valid++;
col_indices[idx] = new_col_idx;
}
else
{
col_indices[idx] = -1;
}
}
reduction[threadIdx.x] = valid;
for (int s = 2; s > 0; s >>= 1)
{
if (coopIdx < s)
{
reduction[threadIdx.x] += reduction[threadIdx.x + s];
}
__syncthreads();
}
if (coopIdx == 0)
{
row_length[row] = reduction[threadIdx.x] + insert_diagonal;
}
row += gridDim.x * blockDim.x / coop;
}
}
__global__ void renumber_P_col_indices(INDEX_TYPE *__restrict__ col_indices, const INDEX_TYPE *__restrict__ renum, INDEX_TYPE num_owned_coarse_pts, INDEX_TYPE num_owned_fine_pts)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < num_owned_fine_pts )
{
INDEX_TYPE col_id = col_indices[idx];
if (col_id < num_owned_coarse_pts)
{
col_indices[idx] = renum[col_id];
}
idx += blockDim.x * gridDim.x;
}
}
template <int coop, class T>
__global__ void reorder_R_matrix(const INDEX_TYPE *old_rows, const INDEX_TYPE *old_cols, const T *old_vals, const INDEX_TYPE *rows, INDEX_TYPE *cols, T *vals, const INDEX_TYPE *renumbering, INDEX_TYPE bsize, INDEX_TYPE num_rows, INDEX_TYPE num_owned_rows)
{
int row = blockIdx.x * blockDim.x / coop + threadIdx.x / coop;
int coopIdx = threadIdx.x % coop;
while (row < num_rows)
{
INDEX_TYPE src_base = old_rows[row];
INDEX_TYPE dst_base = row < num_owned_rows ? rows[renumbering[row]] : src_base;
for (int i = coopIdx; i < old_rows[row + 1]*bsize - src_base * bsize; i += coop)
{
vals[dst_base * bsize + i] = old_vals[src_base * bsize + i];
}
for (int i = coopIdx; i < old_rows[row + 1] - src_base; i += coop)
{
cols[dst_base + i] = old_cols[src_base + i];
}
row += blockDim.x * gridDim.x / coop;
}
}
template <int coop, class T>
__global__ void reorder_whole_matrix(INDEX_TYPE *old_rows, INDEX_TYPE *old_cols, T *old_vals, INDEX_TYPE *rows, INDEX_TYPE *cols, T *vals, INDEX_TYPE *renumbering, INDEX_TYPE bsize, INDEX_TYPE num_rows, INDEX_TYPE insert_diagonal)
{
int row = blockIdx.x * blockDim.x / coop + threadIdx.x / coop;
int coopIdx = threadIdx.x % coop;
while (row < num_rows)
{
INDEX_TYPE src_base = old_rows[row];
INDEX_TYPE dst_base = rows[renumbering[row]];
if (insert_diagonal)
{
if (coopIdx == 0) { cols[dst_base] = renumbering[row]; }
for (int i = coopIdx; i < bsize; i += coop)
{
vals[dst_base * bsize + i] = old_vals[(old_rows[num_rows] + row) * bsize + i];
}
dst_base++;
}
for (int i = coopIdx; i < old_rows[row + 1]*bsize - src_base * bsize; i += coop)
{
vals[dst_base * bsize + i] = old_vals[src_base * bsize + i];
}
for (int i = coopIdx; i < old_rows[row + 1] - src_base; i += coop)
{
cols[dst_base + i] = old_cols[src_base + i];
}
row += blockDim.x * gridDim.x / coop;
}
}
template <int coop, class T>
__global__ void replace_values_matrix(const T *src_vals_h, const T *src_diag_h, const INDEX_TYPE *old_rows, const INDEX_TYPE *rows, T *vals, const INDEX_TYPE *renumbering, INDEX_TYPE bsize, INDEX_TYPE num_rows)
{
int row = blockIdx.x * blockDim.x / coop + threadIdx.x / coop;
int coopIdx = threadIdx.x % coop;
while (row < num_rows)
{
INDEX_TYPE src_base = old_rows[row];
INDEX_TYPE dst_base = rows[renumbering[row]];
for (int i = coopIdx; i < bsize; i += coop)
{
vals[dst_base * bsize + i] = src_diag_h[row * bsize + i];
}
dst_base++;
for (int i = coopIdx; i < old_rows[row + 1]*bsize - src_base * bsize; i += coop)
{
vals[dst_base * bsize + i] = src_vals_h[src_base * bsize + i];
}
row += blockDim.x * gridDim.x / coop;
}
}
template <int coop, class T>
__global__ void replace_values_matrix(const T *src_vals_h, const INDEX_TYPE *old_rows, const INDEX_TYPE *rows, T *vals, const INDEX_TYPE *renumbering, INDEX_TYPE bsize, INDEX_TYPE num_rows)
{
int row = blockIdx.x * blockDim.x / coop + threadIdx.x / coop;
int coopIdx = threadIdx.x % coop;
while (row < num_rows)
{
INDEX_TYPE src_base = old_rows[row];
INDEX_TYPE dst_base = rows[renumbering[row]];
for (int i = coopIdx; i < old_rows[row + 1]*bsize - src_base * bsize; i += coop)
{
vals[dst_base * bsize + i] = src_vals_h[src_base * bsize + i];
}
row += blockDim.x * gridDim.x / coop;
}
}
//TODO: optimize by vectorizing
template <class T>
__global__ void reorder_whole_halo_matrix(INDEX_TYPE *old_rows, INDEX_TYPE *old_cols, T *old_vals, INDEX_TYPE *rows, INDEX_TYPE *cols, T *vals,
INDEX_TYPE *renumbering, INDEX_TYPE bsize, INDEX_TYPE num_rows, INDEX_TYPE insert_diagonal,
INDEX_TYPE global_offset, INDEX_TYPE local_offset, INDEX_TYPE halo_rows)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
while (row < num_rows)
{
INDEX_TYPE src_base = old_rows[row];
INDEX_TYPE dst = rows[row];
if (insert_diagonal)
{
cols[dst] = global_offset + row;
for (int j = 0; j < bsize; j++) { vals[dst * bsize + j] = old_vals[(old_rows[halo_rows - local_offset] + local_offset + row) * bsize + j]; }
dst++;
}
for (int i = 0; i < old_rows[row + 1] - src_base; i++)
{
INDEX_TYPE colIdx = old_cols[src_base + i];
if (colIdx >= 0)
{
cols[dst] = colIdx;
for (int j = 0; j < bsize; j++) { vals[dst * bsize + j] = old_vals[(src_base + i) * bsize + j]; }
dst++;
}
}
row += blockDim.x * gridDim.x;
}
}
__global__ void calc_rowlen_reorder(INDEX_TYPE *row_offsets, INDEX_TYPE *row_len, INDEX_TYPE *map, INDEX_TYPE size, INDEX_TYPE insert_diag)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < size)
{
row_len[map[idx]] = row_offsets[idx + 1] - row_offsets[idx] + insert_diag;
idx += blockDim.x * gridDim.x;
}
}
template < class TConfig >
void DistributedManagerBase<TConfig>::remove_boundary(IVector_d &flagArray, IVector_d &B2L_map, int size)
{
int num_blocks = min(4096, (size + 127) / 128);
remove_boundary_kernel <<< num_blocks, 128>>>(flagArray.raw(), B2L_map.raw(), size);
cudaCheckError();
}
template < class TConfig >
void DistributedManagerBase<TConfig>::get_unassigned(IVector_d &flagArray, IVector_d &B2L_map, IVector_d &partition_flags, int size, int global_size /*, int rank*/)
{
int num_blocks = min(4096, (size + 191) / 192);
get_unassigned_kernel <<< num_blocks, 192>>>(flagArray.raw(),
B2L_map.raw(),
partition_flags.raw(), size, global_size /*, rank*/);
cudaCheckError();
}
template < class TConfig >
void DistributedManagerBase<TConfig>::set_unassigned(IVector_d &partition_flags, IVector_d &partition_renum, IVector_d &B2L_map, IVector_d &renumbering, int size, int max_element, int global_size /*, int rank*/)
{
int num_blocks = min(4096, (size + 191) / 192);
set_unassigned_kernel <<< num_blocks, 192>>>(partition_flags.raw(),
partition_renum.raw(),
B2L_map.raw(),
renumbering.raw(),
size, max_element, global_size /*,rank*/);
cudaCheckError();
}
template <class TConfig >
inline void DistributedManagerBase<TConfig>::set_initialized(IVector &row_offsets)
{
// For P and R sizes the sizes are fixed at creation
if(m_fixed_view_size)
{
return;
}
if (neighbors.size() > 0)
{
//distributed: cache num_rows/num_nz for different views
_num_rows_interior = _num_interior_nodes;
_num_nz_interior = row_offsets[_num_rows_interior];
_num_rows_owned = _num_interior_nodes + _num_boundary_nodes;
_num_nz_owned = row_offsets[_num_rows_owned];
_num_rows_full = halo_offsets[neighbors.size()];
if (_num_rows_full >= row_offsets.size())
{
_num_nz_full = row_offsets[row_offsets.size() - 1];
}
else
{
_num_nz_full = row_offsets[_num_rows_full];
}
_num_rows_all = halo_offsets[halo_offsets.size() - 1];
_num_nz_all = _num_nz_full;
}
else
{
_num_rows_interior = _num_interior_nodes;
_num_nz_interior = row_offsets[_num_rows_interior];
_num_rows_owned = _num_interior_nodes;
_num_nz_owned = row_offsets[_num_rows_owned];
_num_rows_full = _num_rows_owned;
_num_nz_full = _num_nz_owned;
_num_rows_all = _num_rows_owned;
_num_nz_all = _num_nz_owned;
}
}
template <class TConfig >
void DistributedManagerBase<TConfig>::createAggregatesRenumbering(IVector_h &renumbering, IVector_h_vector &B2L_maps, int size, int num_neighbors, int &num_interior_aggregates, int &num_boundary_aggregates, int num_rings)
{
createAggRenumbering(renumbering, B2L_maps, size, num_neighbors, num_interior_aggregates, num_boundary_aggregates, num_rings);
}
template <class TConfig >
void DistributedManagerBase<TConfig>::createAggregatesRenumbering(IVector_d &renumbering, IVector_d_vector &B2L_maps, int size, int num_neighbors, int &num_interior_aggregates, int &num_boundary_aggregates, int num_rings)
{
createAggRenumbering(renumbering, B2L_maps, size, num_neighbors, num_interior_aggregates, num_boundary_aggregates, num_rings);
}
template <class TConfig >
template <class IVector_hd>
void DistributedManagerBase<TConfig>::createAggRenumbering(IVector_hd &renumbering, std::vector<IVector_hd> &B2L_maps, int size, int num_neighbors, int &num_interior_aggregates, int &num_boundary_aggregates, int num_rings)
{
if (num_rings != 1)
{
FatalError("num_rings > 1 not supported in consolidation", AMGX_ERR_NOT_IMPLEMENTED);
}
//int num_neighbors = this->neighbors.size();
if (num_neighbors == 0)
{
num_boundary_aggregates = 0;
num_interior_aggregates = size;
return;
}
//initial size to size+1 so we have the total size after a scan
int global_size = size;
renumbering.resize(size + 1);
//
// Step 1 - in the main matrix, separate interior and boundary nodes (1/0 in flagArray), renumber interior ones with an exclusive scan
//
IVector_hd flagArray(size + 1);
thrust::fill(flagArray.begin(), flagArray.begin() + size + 1, 1);
cudaCheckError();
//sets 1 for interior nodes, 0 for boundary node
for (int i = 0; i < num_neighbors; i++ )
{
int size = B2L_maps[i].size();
remove_boundary(flagArray, B2L_maps[i], size);
}
//gets the renumbering of interior nodes
thrust::exclusive_scan(flagArray.begin(), flagArray.begin() + size + 1, renumbering.begin());
cudaCheckError();
//
// Step 2 - Renumber nodes that are in the boundary, stepping through each B2L map, and renumbering ones that have not been renumbered yet
//
//what is the biggest B2L size
INDEX_TYPE max_size = 0;
for (int i = 0; i < num_neighbors; i++)
{
max_size = max_size > B2L_maps[i].size() ? max_size : B2L_maps[i].size();
}
//allocate work vectors (should be pretty small)
IVector_hd partition_flags(max_size);
IVector_hd partition_renum(max_size);
//the number of renumbered nodes so far
int max_element = renumbering[size];
num_interior_aggregates = max_element;
num_boundary_aggregates = size - max_element;
renumbering.resize(size);
for (int i = 0; i < num_neighbors; i++)
{
//find nodes that are part of the current boundary and they haven't been renumbered yet
thrust::fill(partition_flags.begin(), partition_flags.begin() + max_size, 0);
int size = B2L_maps[i].size();
get_unassigned(flagArray, B2L_maps[i], partition_flags, size, global_size/*,0*/);
//calculate the local renumbering (within this boundary region) of these nodes
thrust::exclusive_scan(partition_flags.begin(), partition_flags.begin() + max_size, partition_renum.begin());
//apply renumbering to the big numbering table
set_unassigned(partition_flags, partition_renum, B2L_maps[i], renumbering, size, max_element, global_size/*,0*/);
//update the number of renumbered nodes
max_element += partition_renum[max_size - 1] + partition_flags[max_size - 1];
}
cudaCheckError();
}
template <class TConfig>
inline DistributedManagerBase<TConfig>::DistributedManagerBase(Matrix<TConfig> &a) :
m_fine_level_comms(NULL), A(&a), m_pinned_buffer_size(0), m_pinned_buffer(NULL), _num_interior_nodes(0), _num_boundary_nodes(0), _comms(NULL), has_B2L(false),
neighbors(_neighbors), B2L_maps(_B2L_maps), L2H_maps(_L2H_maps), B2L_rings(_B2L_rings),
halo_rows_ref_count(0), halo_btl_ref_count(0), halo_ranges(_halo_ranges), halo_ranges_h(_halo_ranges_h), part_offsets(_part_offsets), part_offsets_h(_part_offsets_h), halo_rows(NULL), halo_btl(NULL), m_is_root_partition(false), m_is_glued(false), m_is_fine_level_glued(false), m_is_fine_level_consolidated(false), m_is_fine_level_root_partition(false), m_use_cuda_ipc_consolidation(false), m_fixed_view_size(false)
{
cudaEventCreate(&comm_event);
cudaStreamCreateWithFlags(&m_int_stream, cudaStreamNonBlocking);
cudaStreamCreateWithFlags(&m_bdy_stream, cudaStreamNonBlocking);
this->createComms(A->getResources());
int my_id = this->getComms()->get_global_id();
int num_parts = this->getComms()->get_num_partitions();
this->set_global_id(my_id);
this->set_num_partitions(num_parts);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::generatePoisson7pt(int nx, int ny, int nz, int P, int Q, int R)
{
int my_id = this->getComms()->get_global_id();
int p, q, r;
if (nx < P || ny < Q || nz < R)
{
FatalError("(nx < P) or (ny < Q) or (nz < R) not supported\n", AMGX_ERR_NOT_IMPLEMENTED);
}
/* compute p,q,r from P,Q,R and myid */
p = my_id % P; // Position in x direction
q = (( my_id - p) / P) % Q; // Position in y
r = ( my_id - p - P * q) / ( P * Q ); // Position in z
// Create A.row_indices, A.col_indices, A.values, A.diag
int num_rows = nx * ny * nz;
int num_nonzeros = num_rows * 7; // Ignoring any boundary, 7 nnz per row
int num_substract = 0;
if (p == 0) { num_substract += ny * nz; }
if (p == P - 1) { num_substract += ny * nz; }
if (q == 0) { num_substract += nx * nz; }
if (q == Q - 1) { num_substract += nx * nz; }
if (r == 0) { num_substract += nx * ny; }
if (r == R - 1) { num_substract += nx * ny; }
num_nonzeros -= num_substract;
int num_halo_nodes = 2 * (ny * nz + nx * nz + nx * ny) - num_substract;
this->local_to_global_map.resize(num_halo_nodes);
this->A->set_initialized(0);
this->A->resize(0, 0, 0, 1, 1, 1);
this->A->addProps(CSR);
this->A->resize(num_rows, num_rows + num_halo_nodes, num_nonzeros, 1, 1, 1);
const int cta_size = 128;
const int grid_size = std::min( 4096, (num_rows + cta_size - 1) / cta_size );
poisson7pt_count_row_len <<< grid_size, cta_size>>>(this->A->row_offsets.raw(), nx, ny, nz, p, q, r, P, Q, R, num_rows);
thrust_wrapper::exclusive_scan(this->A->row_offsets.begin(), this->A->row_offsets.end(), this->A->row_offsets.begin());
cudaCheckError();
// Now set nonzeros columns and values
// TODO: vectorize this
const int grid_size2 = std::min( 4096, (num_rows + cta_size - 1) / cta_size );
poisson7pt_set_col_values <<< grid_size2, cta_size>>>
(this->A->row_offsets.raw(),
this->A->col_indices.raw(),
this->A->values.raw(),
this->A->diag.raw(),
this->local_to_global_map.raw(),
nx, ny, nz,
p, q, r,
P, Q, R,
num_rows);
cudaCheckError();
// fill parts_offsets_h
// All ranks have same number of nodes
int num_ranks = P * Q * R;
this->part_offsets_h.resize(num_ranks + 1);
this->part_offsets_h[0] = (int64_t) 0;
for (int i = 1; i < num_ranks + 1; i++)
{
this->part_offsets_h[i] = this->part_offsets_h[i - 1] + (int64_t) num_rows;
}
// Device to host copy
this->part_offsets = this->part_offsets_h;
this->num_rows_global = P * Q * R * nx * ny * nz;
// this->A->set_initialized(1);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
template <typename t_colIndex>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributed_SetOffsets(
int num_ranks, int num_rows_global, const t_colIndex* partition_offsets)
{
// fill part offsets internal data structures
this->part_offsets_h.resize(num_ranks + 1);
for (int i = 0; i <= num_ranks; i++)
{
this->part_offsets_h[i] = partition_offsets[i];
}
// copy to device
this->part_offsets = this->part_offsets_h;
// set num of global rows
this->num_rows_global = num_rows_global;
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
template <typename t_colIndex>
map<t_colIndex, int> DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributed_LocalToGlobal(int num_rows, I64Vector_h &off_diag_cols)
{
// sort global column indices
thrust::sort(off_diag_cols.begin(), off_diag_cols.end());
// find unique columns and set local <-> global mappings
// 1) Removed unneeded vector 2) Create map on host first, upload later (less thrust calls)
I64Vector_h local_to_global_h;
map<t_colIndex, int> global_to_local; // temporary
if (off_diag_cols.size() > 0)
{
global_to_local[off_diag_cols[0]] = num_rows;
local_to_global_h.push_back(off_diag_cols[0]);
}
for (int i = 1; i < off_diag_cols.size(); i++)
{
if (off_diag_cols[i] != off_diag_cols[i - 1])
{
global_to_local[off_diag_cols[i]] = num_rows + local_to_global_h.size();
local_to_global_h.push_back(off_diag_cols[i]);
}
}
// Upload finished map in one piece
this->local_to_global_map.resize(local_to_global_h.size());
thrust::copy(local_to_global_h.begin(), local_to_global_h.end(), this->local_to_global_map.begin());
return global_to_local;
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributed_InitLocalMatrix(
IVector_h local_col_indices,
int num_rows,
int num_nonzeros,
const int block_dimx,
const int block_dimy,
const int *row_offsets,
const mat_value_type *values,
const void *diag)
{
// init local matrix
this->A->set_initialized(0);
this->A->resize(0, 0, 0, 1, 1, 1);
this->A->addProps(CSR);
if (diag)
{
this->A->addProps(DIAG);
}
this->A->resize(num_rows, num_rows + this->local_to_global_map.size(), num_nonzeros, block_dimx, block_dimy, 1);
cudaCheckError();
// set local matrix
thrust::copy(row_offsets, row_offsets + num_rows + 1, this->A->row_offsets.begin());
this->A->col_indices = local_col_indices;
thrust::copy(values, values + num_nonzeros * block_dimx * block_dimy, this->A->values.begin());
cudaCheckError();
// setup diagonal
if (diag)
{
cudaMemcpy(this->A->values.raw() + this->A->diagOffset()*this->A->get_block_size(), diag, sizeof(mat_value_type) * num_rows * block_dimx * block_dimy, cudaMemcpyDefault);
}
else
{
this->A->computeDiagonal();
}
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
template <typename t_colIndex>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributedMatrixPartitionVec(
int num_rows,
int num_nonzeros,
const int block_dimx,
const int block_dimy,
const int *row_offsets,
const t_colIndex *col_indices,
const mat_value_type *values,
int num_ranks,
int num_rows_global,
const void *diag,
const int *partition)
{
// fetch my rank
int my_id = this->getComms()->get_global_id();
// setup partition vector
IVector_h partitionVec(num_rows_global);
if (partition == NULL)
{
IVector_h rowCounts(num_ranks);
this->getComms()->all_gather(num_rows, rowCounts, 1);
int p = 0;
for (int i = 0; i < num_ranks; ++i)
{
for (int j = 0; j < rowCounts[i]; ++j)
{
partitionVec[p++] = i;
}
}
}
else
{
// use existing partition info
for (int i = 0; i < num_rows_global; i++)
{
partitionVec[i] = partition[i];
}
}
// compute partition offsets (based on number of elements per partition). Will be modified when calculating partition map.
t_colIndex *partition_offsets = (t_colIndex *)calloc(num_ranks + 1, sizeof(t_colIndex));
for (int i = 0; i < num_rows_global; i++)
{
int pvi = partitionVec[i];
partition_offsets[pvi + 1]++;
}
thrust::inclusive_scan(partition_offsets, partition_offsets + num_ranks + 1, partition_offsets);
loadDistributed_SetOffsets(num_ranks, num_rows_global, partition_offsets);
// compute partition map (which tells you how the global elements are mapped into the partitions)
t_colIndex *partition_map = (t_colIndex *)calloc(num_rows_global, sizeof(t_colIndex));
for (int i = 0; i < num_rows_global; i++)
{
int pvi = partitionVec[i];
t_colIndex poi = partition_offsets[pvi];
partition_map[poi] = i;
partition_offsets[pvi]++;
}
free(partition_offsets);
// compute the inverse partition map
t_colIndex *ipartition_map = (t_colIndex *)calloc(num_rows_global, sizeof(t_colIndex));
for (int i = 0; i < num_rows_global; i++)
{
ipartition_map[partition_map[i]] = i;
}
free(partition_map);
int h_cidx_allocated = 0;
const t_colIndex *h_col_indices_global = (const t_colIndex *)this->getHostPointerForData(col_indices, num_nonzeros * sizeof(t_colIndex), &h_cidx_allocated);
// gather all off-diag columns
I64Vector_h off_diag_cols;
for (int i = 0; i < num_nonzeros; i++)
{
if (partitionVec[h_col_indices_global[i]] != my_id)
{
off_diag_cols.push_back(ipartition_map[h_col_indices_global[i]]);
}
}
auto global_to_local = loadDistributed_LocalToGlobal<t_colIndex>(num_rows, off_diag_cols);
// set 1, then scan to compute local row indices
IVector_h my_indices(num_rows_global);
for (int i = 0; i < num_nonzeros; i++)
{
if (partitionVec[h_col_indices_global[i]] == my_id) // find my local columns and set to 1
{
my_indices[ipartition_map[h_col_indices_global[i]]] = 1;
}
}
thrust::exclusive_scan(my_indices.begin(), my_indices.end(), my_indices.begin());
// remap colums to local
IVector_h local_col_indices(num_nonzeros);
for (int i = 0; i < num_nonzeros; i++)
{
if (partitionVec[h_col_indices_global[i]] != my_id)
{
// off-diag
local_col_indices[i] = global_to_local[ipartition_map[h_col_indices_global[i]]];
}
else
{
// diag
local_col_indices[i] = my_indices[ipartition_map[h_col_indices_global[i]]];
}
}
free(ipartition_map);
loadDistributed_InitLocalMatrix(local_col_indices, num_rows, num_nonzeros, block_dimx, block_dimy, row_offsets, values, diag);
cudaCheckError();
// don't free possibly allocated pinned buffer, since it could be used later. if it would not - it would be deallocated automatically
/*if (h_cidx_allocated)
{
free((void*)h_col_indices_global);
}*/
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
template <typename t_colIndex>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributedMatrixPartitionOffsets(
int num_rows,
int num_nonzeros,
const int block_dimx,
const int block_dimy,
const int *row_offsets,
const t_colIndex *col_indices,
const mat_value_type *values,
int num_ranks,
int num_rows_global,
const void *diag,
const t_colIndex *partition_offsets)
{
// fetch my rank
int my_id = this->getComms()->get_global_id();
// sanity check, cheap to perform, and helps prevent harder-to-debug errors later on
if (!std::is_sorted(partition_offsets, partition_offsets + num_ranks + 1)) {
FatalError("Partition offsets are not sorted.", AMGX_ERR_BAD_PARAMETERS);
}
loadDistributed_SetOffsets(num_ranks, num_rows_global, partition_offsets);
// Create predicate to determine if a column is in the local diagonal block
t_colIndex my_first_col = this->part_offsets_h[my_id];
t_colIndex one_past_my_last_col = this->part_offsets_h[my_id + 1];
auto in_local_diagonal_block = [my_first_col, one_past_my_last_col](const t_colIndex col_index) {
return col_index >= my_first_col && col_index < one_past_my_last_col;
};
int h_cidx_allocated = 0;
const t_colIndex *h_col_indices_global = (const t_colIndex *)this->getHostPointerForData(col_indices, num_nonzeros * sizeof(t_colIndex), &h_cidx_allocated);
// gather all off-diag columns
I64Vector_h off_diag_cols;
for (int i = 0; i < num_nonzeros; i++)
{
if (!in_local_diagonal_block(h_col_indices_global[i]))
{
off_diag_cols.push_back(h_col_indices_global[i]);
}
}
auto global_to_local = loadDistributed_LocalToGlobal<t_colIndex>(num_rows, off_diag_cols);
// set 1, then scan to compute local row indices
// "coordinate-shift" columns so they lie in much smaller range of my diagonal indices
int diagonal_size = this->part_offsets_h[my_id + 1] - this->part_offsets_h[my_id];
IVector_h my_indices(diagonal_size);
for (int i = 0; i < num_nonzeros; i++)
{
t_colIndex col_index = h_col_indices_global[i];
if (in_local_diagonal_block(h_col_indices_global[i])) // find my local columns and set to 1
{
// olumns that are on *my* diag partition cannot have an index from 0..num_rows_global
// instead, part_offsets_h[my_id] <= col_index < part_offsets[my_id+1]
col_index -= this->part_offsets_h[my_id];
my_indices[col_index] = 1;
}
}
thrust::exclusive_scan(my_indices.begin(), my_indices.end(), my_indices.begin());
// remap colums to local
IVector_h local_col_indices(num_nonzeros);
for (int i = 0; i < num_nonzeros; i++)
{
t_colIndex col_index = h_col_indices_global[i];
if (!in_local_diagonal_block(col_index))
{
// off-diag
local_col_indices[i] = global_to_local[col_index];
}
else
{
// diag
col_index -= this->part_offsets_h[my_id];
local_col_indices[i] = my_indices[col_index];
}
}
loadDistributed_InitLocalMatrix(local_col_indices, num_rows, num_nonzeros, block_dimx, block_dimy, row_offsets, values, diag);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
template <typename t_colIndex>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributedMatrix(
int num_rows,
int num_nonzeros,
const int block_dimx,
const int block_dimy,
const int *row_offsets,
const t_colIndex *col_indices,
const mat_value_type *values,
int num_ranks,
int num_rows_global,
const void *diag,
const MatrixDistribution &dist)
{
using PI = MatrixDistribution::PartitionInformation;
switch (dist.getPartitionInformationStyle()) {
case PI::PartitionVec:
loadDistributedMatrixPartitionVec(num_rows, num_nonzeros, block_dimx, block_dimy,
row_offsets, col_indices, values, num_ranks, num_rows_global, diag, (const int*) dist.getPartitionData());
break;
case PI::PartitionOffsets:
loadDistributedMatrixPartitionOffsets(num_rows, num_nonzeros, block_dimx, block_dimy,
row_offsets, col_indices, values, num_ranks, num_rows_global, diag, (const t_colIndex*) dist.getPartitionData());
break;
default:
FatalError("Unsupported partitioning data format used with loadDistributedMatrix", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::renumberMatrixOneRing(int update_neighbours)
{
FatalError("Distributed classical AMG not implemented on host", AMGX_ERR_NOT_IMPLEMENTED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::renumberMatrixOneRing(int update_neighbours)
{
// Step 1: Using halo_ranges, flag neighbors and at the same time, flag halo_nodes (flag_halo_nodes_local)
int my_id = this->global_id();
int num_parts = this->get_num_partitions();
this->set_base_index(this->part_offsets_h[my_id]);
this->set_index_range(this->part_offsets_h[my_id + 1] - this->part_offsets_h[my_id]);
DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>;
// Create/update list of neighbors
if (update_neighbours)
{
typedef typename TConfig::template setVecPrec<AMGX_vecInt64>::Type i64vec_value_type;
typedef Vector<i64vec_value_type> I64Vector;
typedef typename Matrix<TConfig>::MVector MVector;
std::vector<IVector> halo_row_offsets(this->neighbors.size());
std::vector<I64Vector> halo_global_indices(this->neighbors.size());
std::vector<MVector> halo_values(this->neighbors.size());
prep->create_halo_rows_global_indices(*(this->A), halo_row_offsets, halo_global_indices, halo_values);
prep->update_neighbors_list(*(this->A), this->neighbors, this->halo_ranges_h, this->halo_ranges, this->part_offsets_h, this->part_offsets, halo_row_offsets, halo_global_indices);
}
else
{
prep->create_neighbors_v2(*(this->A));
}
this->getComms()->set_neighbors(this->neighbors.size());
// Create B2L_maps and L2H_maps
prep->create_boundary_lists_v3(*(this->A));
// halo_offsets
int neighbors = this->A->manager->num_neighbors();
int A_num_rows, offset;
this->A->getOffsetAndSizeForView(OWNED, &offset, &A_num_rows);
this->halo_offsets.resize(neighbors + 1, 0);
this->halo_offsets[0] = A_num_rows;
for (int i = 0; i < neighbors; i++)
{
this->halo_offsets[i + 1] = this->halo_offsets[i] + this->B2L_maps[i].size();
}
this->getComms()->exchange_vectors(this->A->manager->B2L_maps, *(this->A), 0);
// Initialize B2L_rings
int num_neighbors = this->neighbors.size();
this->B2L_rings.resize(num_neighbors);
for (int i = 0; i < num_neighbors; i++)
{
this->B2L_rings[i].resize(2);
this->B2L_rings[i][0] = 0;
this->B2L_rings[i][1] = this->B2L_maps[i].size();
}
prep->initialize_B2L_maps_offsets(*(this->A), 1);
delete prep;
//Use the exchanged halo row matrices and the boundary/halo index lists to renumber and consolidate the matrix
// Step 5: renumber all owned rows and columns
this->reorder_matrix_owned();
// Step 6: renumber local_to_global_map
int num_owned_rows = this->A->manager->halo_offsets[0];
int size_one_ring;
this->A->getOffsetAndSizeForView(FULL, &offset, &size_one_ring);
I64Vector_d global_col_indices(size_one_ring);
thrust::sequence(global_col_indices.begin(), global_col_indices.begin() + num_owned_rows, this->base_index() );
cudaCheckError();
global_col_indices.dirtybit = 1;
this->exchange_halo(global_col_indices, global_col_indices.tag);
thrust_wrapper::copy(global_col_indices.begin() + num_owned_rows, global_col_indices.begin() + size_one_ring, this->local_to_global_map.begin(), this->get_int_stream(), true);
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::renumber_P_R(Matrix_h &P, Matrix_h &R, Matrix_h &A_fine)
{
FatalError("Distributed classical AMG not implemented on host", AMGX_ERR_NOT_IMPLEMENTED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::renumber_P_R(Matrix_d &P, Matrix_d &R, Matrix_d &A_fine)
{
int cta_size = 256;
int num_owned_fine_pts = A_fine.manager->halo_offsets[0];
int num_owned_coarse_pts, offset;
// matrix Ac
this->A->getOffsetAndSizeForView(OWNED, &offset, &num_owned_coarse_pts);
// Renumber the owned col indices of P (not the halo columns ,since P.manager was created assunming some other numbering)
int nnz_owned_fine_pts = P.row_offsets[num_owned_fine_pts];
int num_blocks_fine = min(4096, (nnz_owned_fine_pts + cta_size - 1) / cta_size);
if (num_blocks_fine > 0)
{
renumber_P_col_indices <<< num_blocks_fine, cta_size>>>(P.col_indices.raw(), this->renumbering.raw(), num_owned_coarse_pts, nnz_owned_fine_pts);
cudaCheckError();
}
// Renumber the B2L_maps of P
for (int i = 0; i < P.manager->neighbors.size(); i++)
{
thrust::copy(thrust::make_permutation_iterator(this->renumbering.begin(), P.manager->B2L_maps[i].begin()),
thrust::make_permutation_iterator(this->renumbering.begin(), P.manager->B2L_maps[i].end()),
P.manager->B2L_maps[i].begin());
}
cudaCheckError();
// Don't renumber the L2H_maps or the halo
// Renumber the local_to_global_map of matrix P (since neighbors renumbered their owned rows)
// Swap owned rows of R
IVector new_row_offsets(R.row_offsets.size());
int insert = 0;
// Only renumber the owned rows
int num_blocks_owned = min(4096, (num_owned_coarse_pts + cta_size - 1) / cta_size);
if (num_blocks_owned > 0)
{
calc_rowlen_reorder <<< num_blocks_owned, cta_size >>>(R.row_offsets.raw(), new_row_offsets.raw(), this->renumbering.raw(), num_owned_coarse_pts, insert);
cudaCheckError();
}
thrust_wrapper::exclusive_scan(new_row_offsets.begin(), new_row_offsets.begin() + num_owned_coarse_pts + 1, new_row_offsets.begin());
cudaCheckError();
// Copy the row_offsets for halo rows
thrust::copy(R.row_offsets.begin() + num_owned_coarse_pts, R.row_offsets.end(), new_row_offsets.begin() + num_owned_coarse_pts);
cudaCheckError();
// Reorder the rows of R (no need to reorder the column indices)
int new_nnz = new_row_offsets[new_row_offsets.size() - 1];
int halo_offset = new_row_offsets[num_owned_coarse_pts];
typedef typename MatPrecisionMap<t_matPrec>::Type ValueTypeA;
VVector new_values(new_nnz * R.get_block_size(), types::util< ValueTypeA >::get_zero());
IVector new_col_indices(new_nnz, 0);
int num_blocks_total = min(4096, (R.get_num_rows() + cta_size - 1) / cta_size);
if (num_blocks_total > 0)
{
reorder_R_matrix <32> <<< num_blocks_total, 512>>>(R.row_offsets.raw(), R.col_indices.raw(), R.values.raw(), new_row_offsets.raw(), new_col_indices.raw(), new_values.raw(), this->renumbering.raw(), R.get_block_size(), R.get_num_rows(), num_owned_coarse_pts);
cudaCheckError();
}
R.col_indices.swap(new_col_indices);
R.row_offsets.swap(new_row_offsets);
R.values.swap(new_values);
// Renumber the local_to_global_map (since neighbors have changed their owned numbering)
if (P.manager->neighbors.size() != 0)
{
int size_one_ring = P.manager->halo_offsets[P.manager->neighbors.size()];
I64Vector_d global_col_indices(size_one_ring);
thrust::sequence(global_col_indices.begin(), global_col_indices.begin() + num_owned_coarse_pts, this->base_index());
cudaCheckError();
global_col_indices.dirtybit = 1;
P.manager->exchange_halo(global_col_indices, global_col_indices.tag);
thrust_wrapper::copy(global_col_indices.begin() + num_owned_coarse_pts, global_col_indices.begin() + size_one_ring, P.manager->local_to_global_map.begin(), this->get_int_stream(), true);
cudaCheckError();
}
DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>;
prep->initialize_B2L_maps_offsets(P, 1);
delete prep;
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::createOneRingB2Lmaps()
{
// Input:
// a matrix with N rows, whose column indices are local indices from 0 to N+M-1,
// where M is a number of 1-ring halo vertices
// The matrix also contains array "local_to_global_map" of size M, which stores the global index of each halo index
// Ex: assuming a column has index N+K, where 0 <= K < M, then it's global id is local_to_global_map[K]
// The matrix also contains part_offsets_h and part_offsets array, which stores where each partition begins
// Output:
// This function creates all the necessary data to to 1-ring exchanges
// i.e. list of 1-ring neighbors, B2L_maps for 1-ring, halo_offsets for 1-ring,
// Also, the function reorders the halo indices, such that 1-ring indices are in the order
// of neighbors, and therefore, exchange_halo doesn't have to be changed (i.e. L2H = identity)
// What is does:
// Based on the global indices of its halo vertices, count the number of neighbors
// For each neighbor, receive the halo indices that will be needed by neighbor
// From those, create B2L_maps[0], which contains for all neighbors
// This function assumes that:
// part_offset is defined
// B2L_maps
int my_id = this->global_id();
int num_parts = this->get_num_partitions();
this->set_base_index(this->part_offsets_h[my_id]);
this->set_index_range(this->part_offsets_h[my_id + 1] - this->part_offsets_h[my_id]);
DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>;
// This function creates the array neighbors, which contains a list of partitions to which data
// needs to be sent and/or received
prep->create_neighbors_v2(*(this->A));
// Here change the manager if some partitions have no neighbors
this->getComms()->set_neighbors(this->neighbors.size());
prep->create_B2L_one_ring(*(this->A));
delete prep;
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::createOneRingHaloRows()
{
// Input:
// A matrix with 1-ring B2L_maps, 1-ring halo_offsets
// Outputs:
// A matrix with: 1-ring rows,
// 2-ring B2L_maps,
// 2-ring halo_offsets
// 2-ring neighbors
// Implement here:
// Look at function create_B2L_from_maps, which calls create_rings, create_halo_btl, create_halo_rows and comms->exchange_matrix_halo
DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>;
prep->create_one_ring_halo_rows(*(this->A));
// I believe this can be removed since we don't use masked SpMV anymore
prep->createRowsLists(*(this->A), false);
delete prep;
// this is not necessary anymore becasue we don't use latency hiding
// however in future we might want to get back to this in case we want to use latency hiding
//this->reorder_matrix();
}
template <class TConfig>
inline DistributedManagerBase<TConfig>::DistributedManagerBase(
Matrix<TConfig> &a,
INDEX_TYPE allocated_halo_depth,
INDEX_TYPE num_import_rings,
int num_neighbors,
const VecInt_t *neighbors_) : m_fine_level_comms(NULL), A(&a), m_pinned_buffer_size(0), m_pinned_buffer(NULL), _num_interior_nodes(0), _num_boundary_nodes(0), _comms(NULL), has_B2L(false), neighbors(_neighbors), halo_rows_ref_count(0), halo_rows(NULL), halo_btl_ref_count(0), halo_btl(NULL), halo_ranges(_halo_ranges), halo_ranges_h(_halo_ranges_h), part_offsets(_part_offsets), part_offsets_h(_part_offsets_h),
B2L_maps(_B2L_maps), L2H_maps(_L2H_maps), B2L_rings(_B2L_rings), m_is_root_partition(false), m_is_glued(false), m_is_fine_level_glued(false), m_is_fine_level_consolidated(false), m_is_fine_level_root_partition(false), m_use_cuda_ipc_consolidation(false), m_fixed_view_size(false)
{
cudaStreamCreateWithFlags(&m_int_stream, cudaStreamNonBlocking);
cudaStreamCreateWithFlags(&m_bdy_stream, cudaStreamNonBlocking);
if (num_import_rings != 1)
{
FatalError("num_rings > 1 not supported in fine_level consolidation", AMGX_ERR_NOT_IMPLEMENTED);
}
if (allocated_halo_depth != 1)
{
FatalError("allocated_halo_depth > 1 not supported in fine_level consolidation", AMGX_ERR_NOT_IMPLEMENTED);
}
this->set_num_halo_rings(num_import_rings);
neighbors.resize(num_neighbors);
cudaMemcpy(neighbors.raw(), neighbors_, num_neighbors * sizeof(VecInt_t), cudaMemcpyDefault);
cudaCheckError();
}
template <class TConfig>
inline void DistributedManagerBase<TConfig>::cacheMaps(const VecInt_t *b2l_maps, const VecInt_t *b2l_ptrs, const VecInt_t *l2h_maps, const VecInt_t *l2h_ptrs)
{
int num_neighbors = this->neighbors.size();
this->cached_B2L_maps.resize(num_neighbors);
this->cached_L2H_maps.resize(num_neighbors);
for (int i = 0; i < num_neighbors; i++)
{
int size = b2l_ptrs[i + 1] - b2l_ptrs[i];
this->cached_B2L_maps[i].resize(size);
int count = 0;
for (int j = b2l_ptrs[i]; j < b2l_ptrs[i + 1]; j++)
{
this->cached_B2L_maps[i][count] = b2l_maps[j];
count++;
}
size = l2h_ptrs[i + 1] - l2h_ptrs[i];
this->cached_L2H_maps[i].resize(size);
count = 0;
for (int j = l2h_ptrs[i]; j < l2h_ptrs[i + 1]; j++)
{
this->cached_L2H_maps[i][count] = l2h_maps[j];
count++;
}
}
}
template <class TConfig>
inline void DistributedManagerBase<TConfig>::cacheMapsOneRing()
{
int num_neighbors = this->neighbors.size();
this->cached_B2L_maps.resize(num_neighbors);
this->cached_L2H_maps.resize(num_neighbors);
for (int i = 0; i < num_neighbors; i++)
{
this->cached_B2L_maps[i] = this->B2L_maps[i];
this->cached_L2H_maps[i] = this->L2H_maps[i];
}
}
template <class TConfig>
inline void DistributedManagerBase<TConfig>::cacheMapsOneRing(const VecInt_t **b2l_maps, const VecInt_t *b2l_sizes, const VecInt_t **l2h_maps, const VecInt_t *l2h_sizes)
{
int num_neighbors = this->neighbors.size();
this->cached_B2L_maps.resize(num_neighbors);
this->cached_L2H_maps.resize(num_neighbors);
// buffering in the case of GPU data. This shouldn't much affect performance
std::vector<VecInt_t *> b2l_buffer, l2h_buffer;
std::vector<VecInt_t> b2l_sizes_buffer, l2h_sizes_buffer;
b2l_buffer.resize(num_neighbors);
l2h_buffer.resize(num_neighbors);
b2l_sizes_buffer.resize(num_neighbors);
l2h_sizes_buffer.resize(num_neighbors);
cudaMemcpy(&(b2l_sizes_buffer[0]), b2l_sizes, sizeof(VecInt_t) * num_neighbors, cudaMemcpyDefault);
cudaMemcpy(&(l2h_sizes_buffer[0]), l2h_sizes, sizeof(VecInt_t) * num_neighbors, cudaMemcpyDefault);
cudaMemcpy(&(b2l_buffer[0]), b2l_maps, sizeof(VecInt_t *) * num_neighbors, cudaMemcpyDefault);
cudaMemcpy(&(l2h_buffer[0]), l2h_maps, sizeof(VecInt_t *) * num_neighbors, cudaMemcpyDefault);
// caching all of the maps
for (int i = 0; i < num_neighbors; i++)
{
int size = b2l_sizes_buffer[i];
this->cached_B2L_maps[i].resize(size);
cudaMemcpy(&(this->cached_B2L_maps[i][0]), b2l_buffer[i], sizeof(VecInt_t) * size, cudaMemcpyDefault);
cudaCheckError();
size = l2h_sizes_buffer[i];
this->cached_L2H_maps[i].resize(size);
cudaMemcpy(&(this->cached_L2H_maps[i][0]), l2h_buffer[i], sizeof(VecInt_t) * size, cudaMemcpyDefault);
cudaCheckError();
}
}
template <class TConfig>
void DistributedManagerBase<TConfig>::setAConsolidationFlags( Matrix<TConfig> &in_A)
{
this->A = &in_A;
AMG_Config *rsrc_cfg = this->A->getResources()->getResourcesConfig();
std::string scope;
int consolidate_flag, cuda_ipc_flag;
rsrc_cfg->getParameter<int>("fine_level_consolidation", consolidate_flag, "default", scope);
rsrc_cfg->getParameter<int>("use_cuda_ipc_consolidation", cuda_ipc_flag, "default", scope);
this->m_is_fine_level_consolidated = (consolidate_flag != 0);
this->m_use_cuda_ipc_consolidation = (cuda_ipc_flag != 0);
}
template <class TConfig>
void DistributedManagerBase<TConfig>::uploadMatrix(int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag, Matrix<TConfig> &in_A)
{
this->setAConsolidationFlags(in_A);
if (this->m_is_fine_level_consolidated)
{
this->A->manager->consolidateAndUploadAll(n, nnz, block_dimx, block_dimy, row_ptrs, col_indices, data, diag, *(this->A));
}
else
{
this->A->manager->initializeUploadReorderAll(n, nnz, block_dimx, block_dimy, row_ptrs, col_indices, data, diag, *(this->A));
}
}
template <class TConfig>
void DistributedManagerBase<TConfig>::checkPinnedBuffer(size_t size)
{
if ((m_pinned_buffer_size < size) && (m_pinned_buffer != NULL))
{
cudaFreeHost(m_pinned_buffer);
m_pinned_buffer = NULL;
m_pinned_buffer_size = 0;
}
if (m_pinned_buffer == NULL)
{
m_pinned_buffer_size = (size_t)(size * 1.1);
cudaMallocHost(&m_pinned_buffer, m_pinned_buffer_size);
}
}
template <class TConfig>
DistributedManagerBase<TConfig>::~DistributedManagerBase()
{
if (m_pinned_buffer != NULL)
{
cudaFreeHost(m_pinned_buffer);
}
destroyComms();
// from childrens:
cudaStreamDestroy(this->m_int_stream);
cudaStreamDestroy(this->m_bdy_stream);
if (!this->halo_rows_ref_count && this->halo_rows != NULL)
{
delete this->halo_rows;
this->halo_rows = NULL;
}
if (!this->halo_btl_ref_count && this->halo_btl != NULL)
{
delete this->halo_btl;
this->halo_btl = NULL;
}
}
// if pointer is host pointer - returns data. If it is device pointer - copies it to the m_pinned_buffer and returns pointer to m_pinned_buffer
template <class TConfig>
void *DistributedManagerBase<TConfig>::getHostPointerForData(void *ptr, size_t size, int *allocated)
{
cudaError_t rc;
cudaPointerAttributes att;
void *ptr_h;
cudaCheckError();
/* WARNING: We may accept the following types of allocation for ptr:
1. malloc [host memory]
2. cudaMalloc [device memory]
3. malloc + cudaHostRegister [AMGX_pin_memory/AMGX_unpin_memory host memory]
4. cudaHostAlloc [pinned host memory form the beginning]
The correct way to conver these cases is the following:
cudaPointerAttributes att;
cudaError_t st = cudaPointerGetAttributes(&att, ptr);
if (st == cudaSuccess) {
//you are in case 2, 3 or 4.
}
else{
//you are in case 1.
}
The following pattern of checks should be implemented
cudaPointerAttributes att;
cudaError_t st = cudaPointerGetAttributes(&att, ptr);
if (st == cudaSuccess) {
//you are in case 2 or 4.
}
else{
st = cudaHostGetDevicePointer(ptr_on_device, ptr, 0);
if (st == cudaSuccess){
//you are in case 3.
}
else{
//you are in case 1.
}
}
The above pattern will be used whenever we need to process input data.
Obs.: parameter size is in bytes and
parameter allocated indicates whether memory was allocated
and needs to be release later on. */
/*
// original implementation
cudaPointerGetAttributes(&att, ptr);
if (att.hostPointer == NULL)
{
checkPinnedBuffer(size);
cudaMemcpy(m_pinned_buffer, ptr, size, cudaMemcpyDefault);
return m_pinned_buffer;
}
else {
return ptr;
}
*/
*allocated = 0;
// get pointer to values on the device
rc = cudaPointerGetAttributes(&att, ptr);
if (rc == cudaSuccess)
{
//you are in case 2 or 4 from the above comment.
if (att.hostPointer == NULL)
{
//you are in case 2
checkPinnedBuffer(size);
rc = cudaMemcpy(m_pinned_buffer, ptr, size, cudaMemcpyDefault);
if (rc != cudaSuccess)
{
FatalError("Could not copy into the temporary (host) storage. Try pinning the memory to avoid the cudaMemcpy.", AMGX_ERR_BAD_PARAMETERS);
}
ptr_h = m_pinned_buffer;
*allocated = 1;
}
else
{
//you are in case 4
ptr_h = ptr;
}
}
else
{
//you are in case 1 or 3 from the above comment
ptr_h = ptr;
}
cudaGetLastError(); //to reset last error
/* check for null pointers */
if (ptr_h == NULL)
{
FatalError("Result of (host) allocation of required temporary storage is NULL. Try pinning the memory to reduce storage requirements.", AMGX_ERR_BAD_PARAMETERS);
}
return ptr_h;
}
// if pointer is host pointer - returns data. If it is device pointer - copies it to the m_pinned_buffer and returns pointer to m_pinned_buffer
template <class TConfig>
const void *DistributedManagerBase<TConfig>::getHostPointerForData(const void *ptr, size_t size, int *allocated)
{
cudaError_t rc;
cudaPointerAttributes att;
void *ptr_h;
cudaCheckError();
/* WARNING: We may accept the following types of allocation for ptr:
1. malloc [host memory]
2. cudaMalloc [device memory]
3. malloc + cudaHostRegister [AMGX_pin_memory/AMGX_unpin_memory host memory]
4. cudaHostAlloc [pinned host memory form the beginning]
The correct way to conver these cases is the following:
cudaPointerAttributes att;
cudaError_t st = cudaPointerGetAttributes(&att, ptr);
if (st == cudaSuccess) {
//you are in case 2, 3 or 4.
}
else{
//you are in case 1.
}
The following pattern of checks should be implemented
cudaPointerAttributes att;
cudaError_t st = cudaPointerGetAttributes(&att, ptr);
if (st == cudaSuccess) {
//you are in case 2 or 4.
}
else{
st = cudaHostGetDevicePointer(ptr_on_device, ptr, 0);
if (st == cudaSuccess){
//you are in case 3.
}
else{
//you are in case 1.
}
}
The above pattern will be used whenever we need to process input data.
Obs.: parameter size is in bytes and
parameter allocated indicates whether memory was allocated
and needs to be release later on. */
*allocated = 0;
// get pointer to values on the device
rc = cudaPointerGetAttributes(&att, ptr);
if (rc == cudaSuccess)
{
//you are in case 2 or 4 from the above comment.
if (att.hostPointer == NULL)
{
//you are in case 2
checkPinnedBuffer(size);
rc = cudaMemcpy(m_pinned_buffer, ptr, size, cudaMemcpyDefault);
if (rc != cudaSuccess)
{
FatalError("Could not copy into the temporary (host) storage. Try pinning the memory to avoid the cudaMemcpy.", AMGX_ERR_BAD_PARAMETERS);
}
ptr_h = m_pinned_buffer;
*allocated = 1;
cudaGetLastError(); //to reset last error
return ptr_h;
}
else
{
//you are in case 4
cudaGetLastError(); //to reset last error
return ptr;
}
}
else
{
cudaGetLastError(); //to reset last error
//you are in case 1 or 3 from the above comment
return ptr;
}
}
template <class TConfig>
void *DistributedManagerBase<TConfig>::getDevicePointerForData(void *ptr, size_t size, int *allocated)
{
cudaError_t rc;
cudaPointerAttributes att;
void *ptr_d;
cudaCheckError();
/* WARNING: We may accept the following types of allocation for ptr:
1. malloc [host memory]
2. cudaMalloc [device memory]
3. malloc + cudaHostRegister [AMGX_pin_memory/AMGX_unpin_memory host memory]
4. cudaHostAlloc [pinned host memory form the beginning]
The correct way to conver these cases is the following:
cudaPointerAttributes att;
cudaError_t st = cudaPointerGetAttributes(&att, ptr);
if (st == cudaSuccess) {
//you are in case 2, 3 or 4.
}
else{
//you are in case 1.
}
The following pattern of checks should be implemented
cudaPointerAttributes att;
cudaError_t st = cudaPointerGetAttributes(&att, ptr);
if (st == cudaSuccess) {
//you are in case 2 or 4.
}
else{
st = cudaHostGetDevicePointer(ptr_on_device, ptr, 0);
if (st == cudaSuccess){
//you are in case 3.
}
else{
//you are in case 1.
}
}
The above pattern will be used whenever we need to process input data.
Obs.: parameter size is in bytes and
parameter allocated indicates whether memory was allocated
and needs to be release later on. */
*allocated = 0;
// get pointer to values on the device
rc = cudaPointerGetAttributes(&att, ptr);
if (rc == cudaSuccess)
{
//you are in case 2 or 4 from the above comment.
ptr_d = (void *)att.devicePointer;
}
else
{
//you are in case 1 or 3 from the above comment
rc = cudaHostGetDevicePointer(&ptr_d, ptr, 0);
if (rc != cudaSuccess)
{
//you are in case 1
rc = cudaMalloc(&ptr_d, size);
if (rc != cudaSuccess)
{
FatalError("Could not allocate required temporary storage. Try pinning the memory to reduce storage requirements.", AMGX_ERR_BAD_PARAMETERS);
}
rc = cudaMemcpy(ptr_d, ptr, size, cudaMemcpyDefault);
if (rc != cudaSuccess)
{
FatalError("Could not copy into the temporary storage. Try pinning the memory to avoid the cudaMemcpy.", AMGX_ERR_BAD_PARAMETERS);
}
*allocated = 1;
}
}
/* check for null pointers */
if (ptr_d == NULL)
{
FatalError("Result of allocation of required temporary storage is NULL. Try pinning the memory to reduce storage requirements.", AMGX_ERR_BAD_PARAMETERS);
}
cudaGetLastError(); //to reset last error
return ptr_d;
}
template <class TConfig>
const void *DistributedManagerBase<TConfig>::getDevicePointerForData(const void *ptr, size_t size, int *allocated)
{
cudaError_t rc;
cudaPointerAttributes att;
void *ptr_d;
cudaCheckError();
/* WARNING: We may accept the following types of allocation for ptr:
1. malloc [host memory]
2. cudaMalloc [device memory]
3. malloc + cudaHostRegister [AMGX_pin_memory/AMGX_unpin_memory host memory]
4. cudaHostAlloc [pinned host memory form the beginning]
The correct way to conver these cases is the following:
cudaPointerAttributes att;
cudaError_t st = cudaPointerGetAttributes(&att, ptr);
if (st == cudaSuccess) {
//you are in case 2, 3 or 4.
}
else{
//you are in case 1.
}
The following pattern of checks should be implemented
cudaPointerAttributes att;
cudaError_t st = cudaPointerGetAttributes(&att, ptr);
if (st == cudaSuccess) {
//you are in case 2 or 4.
}
else{
st = cudaHostGetDevicePointer(ptr_on_device, ptr, 0);
if (st == cudaSuccess){
//you are in case 3.
}
else{
//you are in case 1.
}
}
The above pattern will be used whenever we need to process input data.
Obs.: parameter size is in bytes and
parameter allocated indicates whether memory was allocated
and needs to be release later on. */
*allocated = 0;
// get pointer to values on the device
rc = cudaPointerGetAttributes(&att, ptr);
if (rc == cudaSuccess)
{
//you are in case 2 or 4 from the above comment.
cudaGetLastError(); //to reset last error
return (const void *)att.devicePointer;
}
else
{
//you are in case 1 or 3 from the above comment
rc = cudaHostGetDevicePointer(&ptr_d, (void *)ptr, 0);
if (rc != cudaSuccess)
{
//you are in case 1
rc = cudaMalloc(&ptr_d, size);
if (rc != cudaSuccess)
{
FatalError("Could not allocate required temporary storage. Try pinning the memory to reduce storage requirements.", AMGX_ERR_BAD_PARAMETERS);
}
rc = cudaMemcpy(ptr_d, ptr, size, cudaMemcpyDefault);
if (rc != cudaSuccess)
{
FatalError("Could not copy into the temporary storage. Try pinning the memory to avoid the cudaMemcpy.", AMGX_ERR_BAD_PARAMETERS);
}
*allocated = 1;
cudaGetLastError(); //to reset last error
return (const void *)ptr_d;
}
}
/* check for null pointers */
if (ptr_d == NULL)
{
FatalError("Result of allocation of required temporary storage is NULL. Try pinning the memory to reduce storage requirements.", AMGX_ERR_BAD_PARAMETERS);
}
// shouldn't get there
cudaGetLastError(); //to reset last error
return NULL;
}
template <class TConfig>
void initializeMatrixCopyAll(int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag, Matrix<TConfig> *A)
{
typedef typename TConfig::MatPrec mat_value_type;
A->resize( n, n, nnz, block_dimx, block_dimy );
//Upload the entire matrix
cudaMemcpy( A->row_offsets.raw(), row_ptrs, (n + 1) * sizeof(int), cudaMemcpyDefault );
cudaCheckError();
cudaMemcpy( A->col_indices.raw(), col_indices, (nnz) * sizeof(int), cudaMemcpyDefault );
cudaCheckError();
cudaMemcpy( A->values.raw(), (mat_value_type *)data, (nnz * block_dimx * block_dimy) * sizeof(mat_value_type), cudaMemcpyDefault );
cudaCheckError();
if (diag)
{
cudaMemcpy( A->values.raw() + A->diagOffset()*A->get_block_size(), (mat_value_type *)diag, (n * block_dimx * block_dimy) * sizeof(mat_value_type), cudaMemcpyDefault );
}
else
{
A->computeDiagonal();
}
cudaCheckError();
}
template <class TConfig>
void DistributedManagerBase<TConfig>::updateMapsReorder()
{
int my_id = this->getComms()->get_global_id();
DistributedComms<TConfig> *comms_tmp = this->getComms();
DistributedComms<TConfig> **comms_ = &comms_tmp;
// Copy B2L_maps in their final place
int num_neighbors = this->neighbors.size();
B2L_maps.resize(num_neighbors);
L2H_maps.resize(num_neighbors);
for (int i = 0; i < num_neighbors; i++)
{
B2L_maps[i] = this->cached_B2L_maps[i];
L2H_maps[i] = this->cached_L2H_maps[i];
}
//Create a DistributedArranger object to map further halo rings and to construct halo row matrices and exchange them (if halo_coloring != LAST)
DistributedArranger<TConfig> *prep = new DistributedArranger<TConfig>;
prep->create_B2L_from_maps( (*(this->A)), my_id, this->num_halo_rings(), neighbors,
B2L_maps, L2H_maps, B2L_rings, comms_, &halo_rows, &halo_btl);
DistributedManagerBaseInit(my_id, 0, this->A->get_num_rows(), *(this->A), comms_, NULL, NULL);
//Use the exchanged halo row matrices and the boundary/halo index lists to renumber and consolidate the matrix
this->reorder_matrix();
prep->initialize_B2L_maps_offsets(*(this->A), this->num_halo_rings());
delete prep;
}
template <class TConfig>
void DistributedManagerBase<TConfig>::initializeUploadReorderAll(int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag, Matrix<TConfig> &in_A)
{
this->A = &in_A;
initializeMatrixCopyAll<TConfig>(n, nnz, block_dimx, block_dimy, row_ptrs, col_indices, data, diag, this->A);
this->updateMapsReorder();
}
template <class TConfig>
void DistributedManagerBase<TConfig>::destroyComms()
{
if ( (this->_comms) != NULL )
{
if (this->_comms->decr_ref_count())
{
delete (this->_comms);
this->_comms = NULL;
}
}
if ( (this->m_fine_level_comms) != NULL)
{
if (this->m_fine_level_comms->decr_ref_count())
{
delete (this->m_fine_level_comms);
this->m_fine_level_comms = NULL;
}
}
}
template <class TConfig>
void DistributedManagerBase<TConfig>::initComms(Resources *rsrc)
{
this->createComms(rsrc);
int my_id = this->getComms()->get_global_id();
int num_parts = this->getComms()->get_num_partitions();
this->set_global_id(my_id);
this->set_num_partitions(num_parts);
}
template <class TConfig>
void DistributedManagerBase<TConfig>::createComms(Resources *rsrc)
{
// create communicator
#ifdef AMGX_WITH_MPI
destroyComms();
if (rsrc == NULL)
FatalError("Resources should not be NULL", AMGX_ERR_INTERNAL);
MPI_Comm *mpi_comm = rsrc->getMpiComm();
AMG_Config *cfg = rsrc->getResourcesConfig();
std::string comm_value, comm_scope;
cfg->getParameter<std::string>("communicator", comm_value, "default", comm_scope);
int rank = -1;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (comm_value == "MPI_DIRECT")
{
_comms = new CommsMPIDirect<TConfig>(*cfg, comm_scope, mpi_comm);
std::string comm_log("Using CUDA-Aware MPI (GPU Direct) communicator...\n");
amgx_distributed_output(comm_log.c_str(), comm_log.length());
}
else if (comm_value == "MPI")
{
_comms = new CommsMPIHostBufferStream<TConfig>(*cfg, comm_scope, mpi_comm);
std::string comm_log("Using Normal MPI (Hostbuffer) communicator...\n");
amgx_distributed_output(comm_log.c_str(), comm_log.length());
}
else
{
FatalError("Bad communicator value", AMGX_ERR_BAD_PARAMETERS);
}
#endif
}
template <class TConfig>
void DistributedManagerBase<TConfig>::malloc_export_maps(VecInt_t ***b2l_maps_e, VecInt_t **b2l_maps_sizes_e, VecInt_t ***l2h_maps_e, VecInt_t **l2h_maps_sizes_e)
{
*b2l_maps_e = (VecInt_t **) malloc(sizeof(VecInt_t *)*this->num_neighbors());
*l2h_maps_e = (VecInt_t **) malloc(sizeof(VecInt_t *)*this->num_neighbors());
*b2l_maps_sizes_e = (VecInt_t *) malloc(sizeof(VecInt_t) * (this->num_neighbors()));
*l2h_maps_sizes_e = (VecInt_t *) malloc(sizeof(VecInt_t) * (this->num_neighbors()));
for (int i = 0; i < this->num_neighbors(); i++)
{
(*b2l_maps_sizes_e)[i] = B2L_maps[i].size();
(*l2h_maps_sizes_e)[i] = L2H_maps[i].size();
(*b2l_maps_e)[i] = (VecInt_t *) malloc(sizeof(VecInt_t) * ( (*b2l_maps_sizes_e)[i]) );
if (L2H_maps[i].size() != 0)
{
(*l2h_maps_e)[i] = (VecInt_t *) malloc(sizeof(VecInt_t) * ( (*l2h_maps_sizes_e)[i]) );
thrust::copy(L2H_maps[i].begin(), L2H_maps[i].end(), (*l2h_maps_e)[i]);
}
thrust::copy(B2L_maps[i].begin(), B2L_maps[i].end(), (*b2l_maps_e)[i]);
}
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::createRenumbering(IVector &renumbering)
{
int num_neighbors = this->neighbors.size();
// still renumber if the number of neighbors = 0, to support non-symmetric matrices
// if (num_neighbors == 0) return;
/*
EXAMPLE
Example matrix, partition 1 arrives with state:
A.row_offsets = [0 4 11 15 20]
A.col_indices = [4 0 1 2
4 5 0 1 2 3 7
0 1 2 3
1 2 3 6 7]
num_neighbors=2; neighbors = [0 2]
B2L_rings[[0 2 4][0 2 4]] B2L_maps[[0 1| 2 3][1 3| 0 2]]
L2H_maps (and halo_lists) [[4 5][6 7]]
*/
int size = 0;
if (this->L2H_maps.size())
{
size = thrust_wrapper::reduce(this->A->col_indices.begin(), this->A->col_indices.end(), int(0), thrust::maximum<int>()) + 1; //Sufficient to do reduction on lth maps
cudaCheckError();
}
else
{
size = this->A->get_num_rows();
}
int rings = (this->B2L_rings.size() > 0) ? this->B2L_rings[0].size() - 1 : 0;
//initial size to size+1 so we have the total size after a scan
renumbering.resize(size + 1);
int global_size = size;
//
// Step 1 - in the main matrix, separate interior and boundary nodes (1/0 in flagArray), renumber interior ones with an exclusive scan
//
IVector flagArray(size + 1);
thrust::fill(flagArray.begin(), flagArray.begin() + size + 1, 1);
cudaCheckError();
//sets 1 for interior nodes, 0 for boundary node
for (int i = 0; i < num_neighbors; i++ )
{
int size = this->B2L_rings[i][1];
int num_blocks = min(4096, (size + 127) / 128);
if (size > 0)
{
remove_boundary_kernel <<< num_blocks, 128>>>(flagArray.raw(), this->B2L_maps[i].raw(), size);
}
//If there are any L2H maps
if (this->L2H_maps.size() && this->L2H_maps[i].size())
{
int size = this->L2H_maps[i].size();
int num_blocks = min(4096, (size + 127) / 128);
remove_boundary_kernel <<< num_blocks, 128>>>(flagArray.raw(), this->L2H_maps[i].raw(), size);
}
cudaCheckError();
}
//gets the renumbering of interior nodes
thrust_wrapper::exclusive_scan(flagArray.begin(), flagArray.begin() + size + 1, renumbering.begin());
cudaCheckError();
/*
EXAMPLE
After removing 1-ring boundary nodes and halo nodes from flagArray: [0 0 1 0 0 0 0 0]
After exclusive scan, which gives renumbering for interior nodes (only node #2)
renumbering: [0 0 0 1 1 1 1 1]
*/
//
// Step 2 - Renumber nodes that are in the boundary, stepping through each B2L map, and renumbering ones that have not been renumbered yet
//
//what is the biggest B2L size
INDEX_TYPE max_size = 0;
for (int i = 0; i < num_neighbors; i++)
{
max_size = max_size > this->B2L_rings[i][1] ? max_size : this->B2L_rings[i][1];
if (this->L2H_maps.size())
{
max_size = max_size > this->L2H_maps[i].size() ? max_size : this->L2H_maps[i].size();
}
}
//allocate work vectors (should be pretty small) that are used to renumber boundary nodes
IVector boundary_renum_flags(max_size);
IVector boundary_renum(max_size);
//the number of renumbered nodes so far
int max_element = renumbering[size];
this->_num_interior_nodes = max_element;
this->_num_boundary_nodes = this->A->get_num_rows() - max_element;
renumbering.resize(size);
/*
EXAMPLE
size = 8
max_size = 2, max_element = 1, num_interior_nodes=1, num_boundary_nodes = 4-1 = 3
*/
for (int i = 0; i < num_neighbors; i++)
{
//find nodes that are part of the current boundary and they haven't been renumbered yet
thrust::fill(boundary_renum_flags.begin(), boundary_renum_flags.begin() + max_size, 0);
int size = this->B2L_rings[i][1];
int num_blocks = min(4096, (size + 191) / 192);
if (size > 0)
get_unassigned_kernel <<< num_blocks, 192>>>(flagArray.raw(),
this->B2L_maps[i].raw(),
boundary_renum_flags.raw(), size, global_size /*,rank*/);
//calculate the local renumbering (within this boundary region) of these nodes
thrust_wrapper::exclusive_scan(boundary_renum_flags.begin(), boundary_renum_flags.begin() + max_size, boundary_renum.begin());
//apply renumbering to the big numbering table
if (size > 0)
set_unassigned_kernel <<< num_blocks, 192>>>(boundary_renum_flags.raw(),
boundary_renum.raw(),
this->B2L_maps[i].raw(),
renumbering.raw(),
size, max_element, global_size /*,rank*/);
//update the number of renumbered nodes
max_element += boundary_renum[max_size - 1] + boundary_renum_flags[max_size - 1];
/*
EXAMPLE
for neighbor 0 (ID 0)
boundary_renum_flags = [0 0], size = 2, flagArray [0 0 1 0 0 0 0 0]
get_unassigned_kernel's output: boundary_renum_flags = [1 1] flagArray [1 1 1 0 0 0 0 0]
after exclusive scan: boundary_renum [0 1]
set_unassigned_kernel updates these arrays and renumbers B2L map:
renumbering = [1 2 0 1 1 1 1 1] B2L_maps[0] = [1 2| 2 3] (note that after element 3 in renumbering and after element 2 we have invalid/not yet updated values)
max_element = 3
for neighbor 1 (ID 2)
get_unassigned_kernels's output: boundary_renum_flags [0 1] flagArray [1 1 1 1 0 0 0 0]
after exclusive scan boundary_renum [0 0]
set_unassigned_kernel renumbering [1 2 0 3 1 1 1 1] B2L_maps[1] = [2 3| 0 2]
max_element = 4
*/
}
cudaCheckError();
//Get renumbering for halo indices
if (this->L2H_maps.size())
{
//TODO: simplify this, we don't need to check whether it has already been renumbered, there is no overlap between halos
for (int i = 0; i < num_neighbors; i++)
{
//find nodes that are part of the current boundary and they haven't been renumbered yet
thrust::fill(boundary_renum_flags.begin(), boundary_renum_flags.begin() + max_size, 0);
int size = this->L2H_maps[i].size();
int num_blocks = min(4096, (size + 191) / 192);
if (size > 0)
get_unassigned_kernel <<< num_blocks, 192>>>(flagArray.raw(),
this->L2H_maps[i].raw(),
boundary_renum_flags.raw(), size, global_size /*,rank*/);
//calculate the local renumbering (within this boundary region) of these nodes
thrust_wrapper::exclusive_scan(boundary_renum_flags.begin(), boundary_renum_flags.begin() + max_size, boundary_renum.begin());
//apply renumbering to the big numbering table
if (size > 0)
set_unassigned_kernel <<< num_blocks, 192>>>(boundary_renum_flags.raw(),
boundary_renum.raw(),
this->L2H_maps[i].raw(),
renumbering.raw(),
size, max_element, global_size /*,rank*/);
//update the number of renumbered nodes
max_element += boundary_renum[max_size - 1] + boundary_renum_flags[max_size - 1];
/*
EXAMPLE
for neighbor 0 (ID 0)
boundary_renum_flags = [0 0], size = 2, flagArray [1 1 1 1 0 0 0 0]
get_unassigned_kernel's output: boundary_renum_flags = [1 1] flagArray [1 1 1 1 1 1 0 0]
after exclusive scan: boundary_renum [0 1]
set_unassigned_kernel updates these arrays and renumbers B2L map:
renumbering = [1 2 0 3 4 5 1 1] L2H_maps[0] = [4 5]
max_element = 6
for neighbor 1 (ID 2)
get_unassigned_kernels's output: boundary_renum_flags [1 1] flagArray [1 1 1 1 1 1 1 1]
after exclusive scan boundary_renum [0 1]
set_unassigned_kernel renumbering = [1 2 0 3 4 5 6 7] L2H_maps[1] = [6 7]
max_element = 8
*/
}
cudaCheckError();
}
//apply renumbering to further halo rings too
if (rings > 1)
{
for (int i = 0; i < num_neighbors; i++)
{
int size = this->B2L_rings[i][this->B2L_rings[i].size() - 1] - this->B2L_rings[i][1];
int num_blocks = min(4096, (size + 127) / 128);
renumber_b2l_maps <<< num_blocks, 128>>>(this->B2L_maps[i].raw() + this->B2L_rings[i][1], renumbering.raw(), size, global_size /*, rank*/);
}
cudaCheckError();
}
/*
EXAMPLE
renumbers further boundary rings as listed in B2L_maps, since they have not been replaced yet with their renumbered values
B2L_maps [[1 2| 0 3][2 3| 1 0]]
*/
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::reorder_matrix_owned()
{
int num_neighbors = this->neighbors.size();
int size = this->A->get_num_rows();
int num_blocks = min(4096, (size + 511) / 512);
int rings = (this->B2L_rings.size() > 0) ? this->B2L_rings[0].size() - 1 : 0;
this->set_num_halo_rings(rings);
int diag = this->A->hasProps(DIAG);
if (diag)
{
FatalError("External diag not supported in classical path", AMGX_ERR_NOT_IMPLEMENTED);
}
//
// Step 1 & 2 - create renumbering
//
this->createRenumbering(this->renumbering);
//now we have the full renumbering table in renum, calculate the inverse
this->inverse_renumbering.resize(this->renumbering.size());
if (this->renumbering.size() > 1)
{
calc_inverse_renumbering <<< min(4096, ((int)this->renumbering.size() + 511) / 512), 512 >>> (this->renumbering.raw(), this->inverse_renumbering.raw(), this->renumbering.size());
cudaCheckError();
}
//
// Step 4 - calculate number/offset of nodes in the halos from the neighbors, ring by ring
//
this->halo_offsets.resize(num_neighbors + 1);
this->halo_offsets[0] = size;
for (int i = 0; i < num_neighbors; i++)
{
this->halo_offsets[i + 1] = this->halo_offsets[i] + this->L2H_maps[i].size();
}
this->set_num_halo_rows(this->halo_offsets[this->halo_offsets.size() - 1] - size);
int nh = this->num_halo_rows();
int total_rows = size + nh;
cudaCheckError();
//
// Step 6 - renumber halo matrices and calculate row length (to eventually append to the big matrix)
//
int insert = 0;
//recalculate row_offsets
IVector new_row_offsets(size + 1);
if (num_blocks > 0)
{
calc_rowlen_reorder <<< num_blocks, 512>>>(this->A->row_offsets.raw(), new_row_offsets.raw(), this->renumbering.raw(), size, insert);
cudaCheckError();
}
thrust::copy(thrust::make_permutation_iterator(this->renumbering.begin(), this->A->col_indices.begin()),
thrust::make_permutation_iterator(this->renumbering.begin(), this->A->col_indices.end()),
this->A->col_indices.begin());
cudaCheckError();
//row_offsets array created by exclusive scan of row sizes
thrust_wrapper::exclusive_scan(new_row_offsets.begin(), new_row_offsets.begin() + size + 1, new_row_offsets.begin());
cudaCheckError();
//
// Step 7 - consolidate column indices and values
//
int new_nnz = new_row_offsets[new_row_offsets.size() - 1];
typedef typename MatPrecisionMap<t_matPrec>::Type ValueTypeA;
VVector new_values((new_nnz + 1 )* this->A->get_block_size(), types::util<ValueTypeA>::get_zero());
IVector new_col_indices(new_nnz, 0);
//reorder based on row permutation
if (num_blocks > 0)
{
reorder_whole_matrix <32> <<< num_blocks, 512>>>(this->A->row_offsets.raw(), this->A->col_indices.raw(), this->A->values.raw(), new_row_offsets.raw(), new_col_indices.raw(), new_values.raw(), this->renumbering.raw(), this->A->get_block_size(), size, insert);
cudaCheckError();
}
//create and append halo rows size
//create an identity matrix in CSR format
int nnz = this->A->get_num_nz();
IVector identity_csr_rows(nh + 1);
IVector identity_csr_cols(nh);
VVector identity_csr_vals(nh, types::util<ValueTypeA>::get_one()); //needs to be changed to MVector, but this definition is messed up in the header file (should fix later)
thrust::sequence(identity_csr_rows.begin(), identity_csr_rows.end());
thrust::sequence(identity_csr_cols.begin(), identity_csr_cols.end());
/*for example, 2x2 identity_csr matrix is created:
identity_csr_rows = { 0, 1, 2 }
identity_csr_cols = { 0, 1 }
identity_csr_vals = { 1.0, 1.0 } */
//shift identity tmatrix by size = this->A->get_num_rows();
thrust::transform(identity_csr_rows.begin(), identity_csr_rows.end(), thrust::constant_iterator<INDEX_TYPE>(nnz), identity_csr_rows.begin(), thrust::plus<INDEX_TYPE>());
thrust::transform(identity_csr_cols.begin(), identity_csr_cols.end(), thrust::constant_iterator<INDEX_TYPE>(size), identity_csr_cols.begin(), thrust::plus<INDEX_TYPE>());
/*for example, 2x2 identity_csr matrix is created:
identity_csr_rows = { 0, 1, 2 }
identity_csr_cols = {size, size+1 }
identity_csr_vals = { 1.0, 1.0 } */
/* WARNING: you must be very careful with the view you are setting (cuurently the view coming here by default is ALL = FULL). If
- classical path is selected then the createOneRingHaloRows -> create_one_ring_halo_rows -> append_halo_rows
routine will be called. It will overwrite the halo rows setup here (and will use view OWNED, which will ignore the
halo rows setup here, to determine how the new halo rows should be placed).
- aggregation path is selected then the extra rows setup here will be used in the R*A*P product, where (in order to match
dimensions of R and P) it is assumed that (the local partition) matrix A is square, therefore it must be padded by identity
rows at the bottom to compensate for the "extra" columns that are outside of the main square part. The old routines for the
aggregation path do this padding at the end of the reorder_matrix routine below. */
//ViewType v = this->A->currentView();
//this->A->setView(ALL);
//Approach 1: use existing routine to append the identity matrix to the existing one
// (seems like too much overhead, also need identity matrix per neighbor)
//DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>;
//prep->append_halo_rows(this->A, identity_csr_rows, identity_csr_cols, identity_csr_vals);
//delete prep;
//Approach 2: custom for this routine
new_row_offsets.resize(total_rows + 1);
new_col_indices.resize(nnz + nh);
new_values.resize(nnz + nh + 1); //extra 1 element stores zero at the end (to follow the original design)
//new_values[nnz]=-1; //marker to track the last element
thrust::copy(identity_csr_rows.begin(), identity_csr_rows.end(), new_row_offsets.begin() + size );
thrust::copy(identity_csr_cols.begin(), identity_csr_cols.end(), new_col_indices.begin() + nnz);
thrust::copy(new_values.begin() + nnz, new_values.begin() + nnz + 1, new_values.begin() + nnz + nh);
thrust::copy(identity_csr_vals.begin(), identity_csr_vals.end(), new_values.begin() + nnz);
/* WARNING: see above. */
this->A->set_num_cols(total_rows);
this->A->set_num_rows(total_rows);
this->A->col_indices.swap(new_col_indices);
new_row_offsets.resize(total_rows + 1);
this->A->row_offsets.swap(new_row_offsets);
new_row_offsets.swap(this->old_row_offsets);
this->A->values.swap(new_values);
this->A->m_seq_offsets.resize(total_rows + 1);
thrust::sequence(this->A->m_seq_offsets.begin(), this->A->m_seq_offsets.end());
cudaCheckError();
//TODO: only do this if AMG_Config matrix_halo_exchange!=2
this->A->delProps(COO);
if (!insert)
{
this->A->computeDiagonal();
}
this->set_initialized(this->A->row_offsets);
this->A->setView(OWNED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::reorder_matrix()
{
int num_neighbors = this->neighbors.size();
if (num_neighbors == 0) { return; }
int size = this->A->get_num_rows();
int num_blocks = min(4096, (size + 511) / 512);
int rings = this->B2L_rings[0].size() - 1;
this->set_num_halo_rings(rings);
int diag = this->A->hasProps(DIAG);
std::vector<Matrix<TConfig_d> > &halo_rows = *this->halo_rows;
std::vector<DistributedManager<TConfig_d> > &halo_btl = *this->halo_btl;
/*
EXAMPLE
The example matrix, on partition 1 arrives at this point with the following state:
num_rings=2
A.num_rows = 4; A.num_nz = 20
A.row_offsets = [0 4 11 15 20]
A.col_indices = [4 0 1 2
4 5 0 1 2 3 7
0 1 2 3
1 2 3 6 7]
num_neighbors=2; neighbors = [0 2]
B2L_rings[[0 2 4][0 2 4]] B2L_maps[[0 1| 2 3][1 3| 0 2]]
L2H_maps (and halo_lists) [[4 5][6 7]]
With the exchange halo rows:
halo_btl[0] (received from neighbor ID 0)
global_id = 0; base_index=0; index_range=6; B2L_rings[0] = [0 2 4]; B2L_maps[0] = [2 3| 0 1] L2H_maps = [4 5]
halo_rows[0].row_offsets = [0 5 13 17 21]
halo_rows[0].col_indices = [1 2 3 4 5
0 1 2 3 4 5 6 7
0 1 3 6
0 1 2 3]
halo_btl[1] (received from neighbor ID 2)
global_id = 2; base_index=0; index_range=8; B2L_rings[0] = [0 2 4]; B2L_maps[0] = [1 2| 0 3] L2H_maps = [6 7]
halo_rows[1].row_offsets = [0 4 11 16 20]
halo_rows[1].col_indices = [7 1 2 3
5 6 7 0 1 2 3
4 5 0 2 3
0 1 2 3]
*/
//
// Step 1 & 2 - create renumbering
//
this->createRenumbering(this->renumbering);
cudaCheckError();
/*
EXAMPLE
this->renumbering = [1 2 0 3 4 5 6 7]
B2L_maps = [[1 2| 0 3][2 3| 1 0]]
L2H_maps = [[4 5][6 7]]
*/
//
// Step 3 - given a full renumbering of owned nodes, calculate inverse renumbering
//
//now we have the full renumbering table in renum, calculate the inverse
this->inverse_renumbering.resize(this->renumbering.size());
calc_inverse_renumbering <<< min(4096, ((int)this->renumbering.size() + 511) / 512), 512 >>> (this->renumbering.raw(), this->inverse_renumbering.raw(), this->renumbering.size());
cudaCheckError();
/*
EXAMPLE
this->inverse_renumbering = [2 0 1 3 4 5 6 7]
*/
//
// Step 4 - calculate number/offset of nodes in the halos from the neighbors, ring by ring
//
this->halo_offsets.resize(rings * num_neighbors + 1, 0);
for (int ring = 0; ring < rings; ring++)
{
for (int i = 0; i < num_neighbors; i++)
{
this->halo_offsets[ring * num_neighbors + i] = halo_btl[i].B2L_rings[0][ring + 1] - halo_btl[i].B2L_rings[0][ring];
}
}
thrust::exclusive_scan(this->halo_offsets.begin(), this->halo_offsets.end(), this->halo_offsets.begin(), size);
cudaCheckError();
this->set_num_halo_rows(this->halo_offsets[this->halo_offsets.size() - 1] - size);
int total_rows = size + this->num_halo_rows();
if (total_rows < this->renumbering.size()) { FatalError("total rows < renumbering.size(), send/recv maps should cover all matrix halo columns", AMGX_ERR_NOT_IMPLEMENTED); }
if (total_rows > this->renumbering.size())
{
this->A->getResources()->warning("# owned nodes + # halo nodes > matrix columns: send/recv maps have some unreferences halo indices, they are not directly connected to our partition and therefore we won't compute them, please use 2-ring comms maps if you want to specify 2nd ring neighbors");
}
cudaCheckError();
/*
EXAMPLE
halo_offsets [2 2 2 2]
after exclusive scan: 4 + [0 2 4 6 8] = [4 6 8 10 12]
num_halo_rows = 8, total_rows = 12
*/
//
// Step 5 - create big mapping table of all halo indices we received (this may use a little too much memory)
//
//count number of fine rows of neighbors
thrust::host_vector<INDEX_TYPE> neighbor_rows(num_neighbors + 1);
int max_num_rows = 0;
for (int i = 0; i < num_neighbors; i++)
{
neighbor_rows[i] = halo_btl[i].index_range();
max_num_rows = max_num_rows > halo_rows[i].get_num_rows() ? max_num_rows : halo_rows[i].get_num_rows();
}
thrust::exclusive_scan(neighbor_rows.begin(), neighbor_rows.end(), neighbor_rows.begin());
cudaCheckError();
int total_rows_of_neighbors = neighbor_rows[num_neighbors];
/*
EXAMPLE
neigbor_rows = [0 6 14]
total_rows_of_neighbors = 14
*/
IVector halo_mapping(total_rows_of_neighbors);
thrust::fill(halo_mapping.begin(), halo_mapping.end(), -1);
cudaCheckError();
//ring by ring, neighbor by neighbor assign sequentially increasing numbers for halo nodes
for (int ring = 0; ring < rings; ring++)
{
for (int i = 0; i < num_neighbors; i++)
{
int size = halo_btl[i].B2L_rings[0][ring + 1] - halo_btl[i].B2L_rings[0][ring];
int num_blocks = min(4096, (size + 127) / 128);
//This renumbering has to result in the same renumbering that comes out of L2H renumbering
create_halo_mapping <<< num_blocks, 128>>>(halo_mapping.raw() + neighbor_rows[i],
halo_btl[i].B2L_maps[0].raw() + halo_btl[i].B2L_rings[0][ring],
halo_btl[i].base_index(), this->halo_offsets[ring * num_neighbors + i], size);
cudaCheckError();
/*
EXAMPLE
ring 0 neighbor 0 - halo_btl[0].B2L_maps[0] = [2 3| 0 1] halo_btl[0].L2H_maps = [4 5]
halo_mapping = [-1 -1 4 5 -1 -1 |-1 -1 -1 -1 -1 -1 -1 -1]
ring 0 neighbor 1 - halo_btl[1].B2L_maps[0] = [1 2| 0 3] halo_btl[1].L2H_maps = [6 7]
halo_mapping = [-1 -1 4 5 -1 -1 |-1 6 7 -1 -1 -1 -1 -1]
ring 1 neighbor 0 - halo_btl[0].B2L_maps[0] = [2 3| 0 1] halo_btl[0].L2H_maps = [4 5]
halo_mapping = [8 9 4 5 -1 -1 |-1 6 7 -1 -1 -1 -1 -1]
ring 1 neighbor 1 - halo_btl[1].B2L_maps[0] = [1 2| 0 3] halo_btl[1].L2H_maps = [6 7]
halo_mapping = [8 9 4 5 -1 -1 |10 6 7 11 -1 -1 -1 -1]
*/
}
}
cudaCheckError();
for (int i = 0; i < num_neighbors; i++)
{
int size = halo_btl[i].L2H_maps[0].size();
int num_blocks = min(4096, (size + 127) / 128);
//Map the column indices of the halo rows that point back to boundary nodes
apply_h2l2b_mapping <<< num_blocks, 128>>>(halo_mapping.raw() + neighbor_rows[i],
halo_btl[i].L2H_maps[0].raw(),
halo_btl[i].base_index(), this->B2L_maps[i].raw(), size);
cudaCheckError();
/*
EXAMPLE
neighbor 0 - mapping back to our own (boundary) indices
halo_mapping = [8 9 4 5 1 2 |10 6 7 11 -1 -1 -1 -1]
neighbor 1 - mapping back to our own (boundary) indices
halo_mapping = [8 9 4 5 1 2 |10 6 7 11 -1 -1 2 3]
*/
}
cudaCheckError();
/*
EXAMPLE
neighbor_rows = [0 6 14]
halo_mapping = [8 9 4 5 1 2 |10 6 7 11 -1 -1 2 3]
The first part (0-6) of halo_mapping gives a local index for all the indices that we want to know about in halo_btl[0]
The second part (7-14) gives local indices for halo_btl[1], that is both halo ring there, and the column indices representing vertices in this partition's boundary.
Note that it does not give indices (-1) for vertices 5 and 6 in neighbor 1 (ID 2), which are column indices connecting it to neighbor 0, hence the two halo regions are not connected
*/
//
// Step 6 - renumber halo matrices and calculate row length (to eventually append to the big matrix)
//
int insert = 0;
if (this->A->hasProps(DIAG) && insertDiagonals) { insert = 1; }
diag = diag && !insertDiagonals;
//recalculate row_offsets
IVector new_row_offsets(size + this->num_halo_rows() + 1);
calc_rowlen_reorder <<< num_blocks, 512>>>(this->A->row_offsets.raw(), new_row_offsets.raw(), this->renumbering.raw(), size, insert);
cudaCheckError();
IVector neighbor_rows_d(num_neighbors + 1);
thrust::copy(neighbor_rows.begin(), neighbor_rows.end(), neighbor_rows_d.begin());
cudaCheckError();
/*
EXAMPLE
get row length according to renumbering
new_row_offsets = [4 4 7 5 0 0 0 0 0 0 0 0 0]
*/
//map column indices of my own matrix
/*map_col_indices<4><<<num_blocks, 512>>>(this->A->row_offsets.raw(),
this->A->col_indices.raw(),
this->renumbering.raw(),
this->halo_ranges.raw(),
halo_mapping.raw(),
neighbor_rows_d.raw(),
this->base_index(), num_neighbors, size);*/
thrust::copy(thrust::make_permutation_iterator(this->renumbering.begin(), this->A->col_indices.begin()),
thrust::make_permutation_iterator(this->renumbering.begin(), this->A->col_indices.end()),
this->A->col_indices.begin());
cudaCheckError();
/*
EXAMPLE
use this->renumbering = [1 2 0 3 4 5 6 7]
to map old column indices to new column indices (i.e. according to interior - boundary - halo separation), but do not reshuffle them into their place yet
A.col_indices = [4 0 1 2
4 5 0 1 2 3 7
0 1 2 3
1 2 3 6 7]
becomes
A.col_indices = [4 1 2 0
4 5 1 2 0 3 7
1 2 0 3
2 0 3 6 7]
*/
cudaCheckError();
IVector temp_row_len(max_num_rows);
for (int i = 0; i < num_neighbors; i++)
{
//map column indices of halo matrices and count of nonzeros we will keep
int size = halo_rows[i].get_num_rows();
int num_blocks = min(4096, (size + 127) / 128);
map_col_indices_and_count_rowlen<4> <<< num_blocks, 128, 128 * sizeof(INDEX_TYPE)>>>(
halo_rows[i].row_offsets.raw(),
halo_rows[i].col_indices.raw(),
temp_row_len.raw(),
halo_mapping.raw() + neighbor_rows[i],
size, insert);
cudaCheckError();
//number of nonzeros per row copied into big row sizes array
for (int ring = 0; ring < rings; ring++)
{
thrust::copy(temp_row_len.begin() + halo_btl[i].B2L_rings[0][ring], temp_row_len.begin() + halo_btl[i].B2L_rings[0][ring + 1], new_row_offsets.begin() + this->halo_offsets[ring * num_neighbors + i]);
}
cudaCheckError();
/*
EXAMPLE
halo_mapping = [8 9 4 5 1 2 |10 6 7 11 -1 -1 2 3]
look at halo row matrices, and halo_mapping, count column indices that do not map to -1 and map them to their new, local index
halo_rows[0].col_indices = [1 2 3 4 5
0 1 2 3 4 5 6 7
0 1 3 6
0 1 2 3]
becomes
halo_rows[0].col_indices = [9 4 5 1 2
8 9 4 5 1 2 -1 -1
8 9 5 -1
8 9 4 5]
with temp_row_len = [5 6 3 4] copied into new_row_offsets: [4 4 7 5| 5 6| 0 0| 3 4| 0 0 0]
halo_rows[1].col_indices = [7 1 2 3
5 6 7 0 1 2 3
4 5 0 2 3
0 1 2 3]
becomes
halo_rows[1].col_indices = [3 6 7 11
-1 2 3 10 6 7 11
-1 -1 10 7 11
10 6 7 11]
with temp_row_len = [4 6 3 4] copied into new_row_offsets: [4 4 7 5| 5 6| 4 6| 3 4| 3 4 0]
*/
}
cudaCheckError();
//row_offsets array created by exclusive scan of row sizes
thrust_wrapper::exclusive_scan(new_row_offsets.begin(), new_row_offsets.begin() + size + this->num_halo_rows() + 1, new_row_offsets.begin());
cudaCheckError();
/*
EXAMPLE
Exclusive scan to get new_row_offsets array:
new_row_offsets = [0 4 8 15 20| 25 31| 35 41| 44 48| 51 55]
*/
//
// Step 7 - consolidate column indices and values
//
int new_nnz = new_row_offsets[new_row_offsets.size() - 1];
typedef typename MatPrecisionMap<t_matPrec>::Type ValueTypeA;
VVector new_values((new_nnz + 1 + diag * (total_rows - 1))* this->A->get_block_size(), types::util<ValueTypeA>::get_zero());
IVector new_col_indices(new_nnz, 0);
//reorder based on row permutation
reorder_whole_matrix <32> <<< num_blocks, 512>>>(this->A->row_offsets.raw(), this->A->col_indices.raw(), this->A->values.raw(), new_row_offsets.raw(), new_col_indices.raw(), new_values.raw(), this->renumbering.raw(), this->A->get_block_size(), size, insert);
cudaCheckError();
if (diag)
{
//reorder based on row permutation
reorder_vector_values <<< num_blocks, 512>>>(new_values.raw() + new_row_offsets[total_rows]*this->A->get_block_size(),
this->A->values.raw() + this->A->row_offsets[size]*this->A->get_block_size(),
this->renumbering.raw(),
this->A->get_block_size(), size);
cudaCheckError();
}
int cumulative_num_rows = size;
for (int i = 0; i < num_neighbors; i++)
{
for (int ring = 0; ring < rings; ring++)
{
int num_rows = halo_btl[i].B2L_rings[0][ring + 1] - halo_btl[i].B2L_rings[0][ring];
int num_blocks = min(4096, (num_rows + 127) / 128);
//copy in nonzeros that we are keeping
//TODO: access pattern - should be implemented with warp-wide scans to decide which nonzeros we are keeping and where the rest is going
reorder_whole_halo_matrix <<< num_blocks, 128>>>(halo_rows[i].row_offsets.raw() + halo_btl[i].B2L_rings[0][ring],
halo_rows[i].col_indices.raw(), halo_rows[i].values.raw(),
new_row_offsets.raw() + this->halo_offsets[ring * num_neighbors + i],
new_col_indices.raw(), new_values.raw(), NULL, this->A->get_block_size(), num_rows,
insert, this->halo_offsets[ring * num_neighbors + i], halo_btl[i].B2L_rings[0][ring], halo_btl[i].B2L_rings[0][rings]);
if (diag)
{
thrust::copy(halo_rows[i].values.begin() + (halo_rows[i].row_offsets[halo_rows[i].get_num_rows()] + halo_btl[i].B2L_rings[0][ring])*this->A->get_block_size(),
halo_rows[i].values.begin() + (halo_rows[i].row_offsets[halo_rows[i].get_num_rows()] + halo_btl[i].B2L_rings[0][ring + 1])*this->A->get_block_size(),
new_values.begin() + (new_row_offsets[total_rows] + cumulative_num_rows)*this->A->get_block_size());
cumulative_num_rows += num_rows;
}
}
}
cudaCheckError();
/*
EXAMPLE
copy everything in place, dropping -1 column indices in the halo and reordering the owned rows
new_row_offsets = [0 4 8 15 20| 25 31| 35 41| 44 48| 51 55]
new_col_indices = [1 2 0 3
4 1 2 0
4 5 1 2 0 3 7
2 0 3 6 7 -end of owned
9 4 5 1 2
8 9 4 5 1 2 - end of neighbor 0 ring 0
3 6 7 11
2 3 10 6 7 11 - end of neighbor 1 ring 0
8 9 5
8 9 4 5 - end of neighbor 0 ring 1
10 7 11
10 6 7 11] - end of neighbor 1 ring 1
*/
this->A->set_num_cols(total_rows);
this->A->set_num_rows(size);
this->A->col_indices.swap(new_col_indices);
new_row_offsets.resize(total_rows + 1);
this->A->row_offsets.swap(new_row_offsets);
new_row_offsets.swap(this->old_row_offsets);
this->A->values.swap(new_values);
this->A->m_seq_offsets.resize(total_rows + 1);
thrust::sequence(this->A->m_seq_offsets.begin(), this->A->m_seq_offsets.end());
if (insert)
{
this->A->delProps(DIAG);
this->A->diag.resize(total_rows);
thrust::copy(this->A->row_offsets.begin(), this->A->row_offsets.end() - 1, this->A->diag.begin());
}
cudaCheckError();
delete this->halo_rows;
delete this->halo_btl;
//set halo_rows and halo_btl to NULL to avoid a potential double free situation in the future
this->halo_rows = NULL;
this->halo_btl = NULL;
this->A->delProps(COO);
this->A->set_initialized(1);
//TODO: only do this if AMG_Config matrix_halo_exchange!=2
if (!insert) { this->A->computeDiagonal(); }
this->A->setView(OWNED);
}
//function object (functor) for thrust calls (it is a unary operator to add a constant)
template<typename T>
class add_constant_op
{
const T c;
public:
add_constant_op(T _c) : c(_c) {}
__host__ __device__ T operator()(const T &x) const
{
return x + c;
}
};
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::obtain_shift_l2g_reordering(index_type n, I64Vector_d &l2g, IVector_d &p, IVector_d &q)
{
/* WARNING: Exchange halo of the inverse_reordering, which is implicitly based on the local_to_global_map (l2g).
Notice that it is implicit in the exchange_halo routine, since you are getting exactly the vector
halo elements, which are exactly the elements you need. They however must be shifted by the partition
starting points (starting global row indices, which are containe din array part_offsets).
This allows us to avoid constructing the global vector for inverse permutation,
as is done in reference MATLAB code. */
//Recall that part_offsets provide the starting point (global row index) of every partition, in other words,
//they contain the prefix sum of number of rows assigned to each partition. Also, notice that part_offsets and
//part_offsets_h have the same values on device and host, respectively. See below few lines for details:
index_type tag = 1 * 133 + 3 * 7 + 0; //some random number for the tag
index_type l = p.size();
q.resize(l);
thrust::copy (p.begin(), p.end(), q.begin());
thrust::transform(q.begin(), q.end(), q.begin(), add_constant_op<index_type>(this->part_offsets[this->global_id()]));
this->exchange_halo(q, tag);
thrust::sequence (q.begin(), q.begin() + n);
thrust::transform(q.begin(), q.begin() + n, q.begin(), add_constant_op<index_type>(this->part_offsets[this->global_id()]));
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::unpack_partition(index_type *Bp, index_type *Bc, mat_value_type *Bv)
{
index_type l, n, nnz, offset;
index_type *ir;
index_type *Ap;
index_type *Ac;
mat_value_type *Av;
IVector q;
//some initializations
this->A->getOffsetAndSizeForView(OWNED, &offset, &n);
this->A->getNnzForView(OWNED, &nnz);
l = this->inverse_renumbering.size();
ir = this->inverse_renumbering.raw();
Ap = this->A->row_offsets.raw();
Ac = this->A->col_indices.raw();
Av = this->A->values.raw();
//(i) reorder the matrix back (into mixed interior-boundary nodes)
//applies to rows and columns (out-of-place)
reorder_partition<index_type, mat_value_type, true, true>
(n, nnz, Ap, Ac, Av, Bp, Bc, Bv, l, ir);
cudaCheckError();
//obtain reordering q that combines the shift of the diagonal block with the off-diagonal block indices conversion from local to global
this->obtain_shift_l2g_reordering(n, this->local_to_global_map, this->inverse_renumbering, q);
cudaCheckError();
//(ii) reorder the matrix back (shift the diagonal block and convert off-diagonal block column indices from local to global)
//applies columns only (in-place)
reorder_partition<index_type, mat_value_type, false, true>
(n, nnz, Bp, Bc, Bv, Bp, Bc, Bv, q.size(), q.raw());
cudaCheckError();
}
template <class TConfig>
void DistributedManagerBase<TConfig>::createNeighToDestPartMap(IVector_h &neigh_to_part, IVector_h &neighbors, IVector_h &destination_part, int num_neighbors)
{
neigh_to_part.resize(num_neighbors);
for (int i = 0; i < num_neighbors; i++)
{
neigh_to_part[i] = destination_part[neighbors[i]];
}
}
template <class TConfig>
void DistributedManagerBase<TConfig>::createConsolidatedNeighToPartMap(IVector_h &cons_neigh_to_part, IVector_h &neigh_to_part, int my_destination_part, IVector_h &destination_part, int &num_cons_neighbors)
{
// input: non-initialized cons_neigh_to_part
// fine_neigh_to_part
// my_destination_part
// output: cons_neigh_to_part
// num_cons_neighbors
cons_neigh_to_part = neigh_to_part;
thrust::sort(cons_neigh_to_part.begin(), cons_neigh_to_part.end());
cudaCheckError();
cons_neigh_to_part.erase(thrust::unique(cons_neigh_to_part.begin(), cons_neigh_to_part.end()), cons_neigh_to_part.end());
// Remove if fine_neigh maps to same coarse partition
cons_neigh_to_part.erase(thrust::remove_if(cons_neigh_to_part.begin(), cons_neigh_to_part.end(), is_my_part(my_destination_part)), cons_neigh_to_part.end());
num_cons_neighbors = cons_neigh_to_part.size();
cudaCheckError();
}
template <class TConfig>
void DistributedManagerBase<TConfig>::createNeighToConsNeigh(IVector_h &neigh_to_cons_neigh, IVector_h &cons_neigh_to_part, IVector_h &neigh_to_part, int my_destination_part, int &num_neighbors)
{
neigh_to_cons_neigh.resize(num_neighbors);
thrust::lower_bound(cons_neigh_to_part.begin(), cons_neigh_to_part.end(), neigh_to_part.begin(), neigh_to_part.end(), neigh_to_cons_neigh.begin());
cudaCheckError();
// Flagging fine neighbors that go to same partition (haven't been found in previous step)
for (int i = 0; i < num_neighbors; i++)
{
if ( neigh_to_part[i] == my_destination_part)
{
neigh_to_cons_neigh[i] = -1;
}
}
}
template <class TConfig>
template <class IVector_hd>
void DistributedManagerBase<TConfig>::consB2Lmaps(std::vector<IVector_hd> &dest_coarse_B2L_maps, std::vector<IVector_hd> &coarse_B2L_maps, IVector_h &fine_neigh_to_coarse_neigh, int num_coarse_neighbors, int num_fine_neighbors)
{
//Merge B2L fine maps per coarse destination
dest_coarse_B2L_maps.resize(num_coarse_neighbors);
std::vector<int> dest_coarse_B2L_maps_scratch_sizes(num_coarse_neighbors, 0);
int my_id = this->global_id();
// Loop over the fine neighbors, to compute size of each dest_coarse_B2L_maps
for (int i = 0; i < num_fine_neighbors; i++)
{
int k = fine_neigh_to_coarse_neigh[i];
if (k != -1)
{
dest_coarse_B2L_maps_scratch_sizes[k] += coarse_B2L_maps[i].size();
}
}
// Now fill dest_coarse_B2L_maps
for (int k = 0; k < num_coarse_neighbors; k++)
{
dest_coarse_B2L_maps[k].resize( dest_coarse_B2L_maps_scratch_sizes[k] );
// Reset sizes to 0 (fill use as offset in next loop);
dest_coarse_B2L_maps_scratch_sizes[k] = 0;
}
for (int i = 0; i < num_fine_neighbors; i++)
{
int k = fine_neigh_to_coarse_neigh[i];
if (k != -1)
{
int offset = dest_coarse_B2L_maps_scratch_sizes[k];
thrust::copy(coarse_B2L_maps[i].begin(), coarse_B2L_maps[i].end(), dest_coarse_B2L_maps[k].begin() + offset);
dest_coarse_B2L_maps_scratch_sizes[k] += coarse_B2L_maps[i].size();
}
}
cudaCheckError();
int max_size = 0;
for (int i = 0; i < num_coarse_neighbors; i++)
{
int size = dest_coarse_B2L_maps[i].size();
if (size > max_size) { max_size = size; }
}
// Remove duplicates (aggregates in boundary that go to same merged partition)
for (int i = 0; i < num_coarse_neighbors; i++)
{
int size = dest_coarse_B2L_maps[i].size();
thrust::sort(dest_coarse_B2L_maps[i].begin(), dest_coarse_B2L_maps[i].begin() + size);
index_type num_unique = thrust::unique(dest_coarse_B2L_maps[i].begin(), dest_coarse_B2L_maps[i].begin() + size) - dest_coarse_B2L_maps[i].begin();
dest_coarse_B2L_maps[i].erase(dest_coarse_B2L_maps[i].begin() + num_unique, dest_coarse_B2L_maps[i].end());
}
cudaCheckError();
}
template <class TConfig>
void DistributedManagerBase<TConfig>::computeConsolidatedOffsets(const int my_id, const int my_destination_part, const bool is_root_partition, const int num_interior_rows, const int num_boundary_rows, IVector_h_vector &vertex_counts, const IVector_h &parts_to_consolidate, const int num_parts_to_consolidate, int &interior_offset, int &boundary_offset, int &total_interior_rows_in_merged, int &total_boundary_rows_in_merged, int &total_rows_in_merged, DistributedComms<TConfig> *comms)
{
IVector_h my_offsets(4);
IVector_h my_sizes(2);
my_sizes[0] = num_interior_rows;
my_sizes[1] = num_boundary_rows;
if (!is_root_partition)
{
//Send number of interior and boundary nodes to root
comms->send_vector_async(my_sizes, my_destination_part, 777);
comms->recv_vector(my_offsets, my_destination_part, 778);
comms->send_vector_wait_all(my_sizes);
}
else
{
vertex_counts.resize(num_parts_to_consolidate);
IVector_h child_sizes(2);
IVector_h offsets_interior(num_parts_to_consolidate);
IVector_h offsets_boundary(num_parts_to_consolidate);
int count_int = 0;
int count_bdy = 0;
for (int i = 0; i < num_parts_to_consolidate; i++)
{
if (parts_to_consolidate[i] == my_id)
{
child_sizes = my_sizes;
}
else
{
comms->recv_vector(child_sizes, parts_to_consolidate[i], 777);
}
//Do a simple cumulative sum to determine total number of interior/boundary rows and their offsets on a per contributing partition basis
offsets_interior[i] = count_int;
offsets_boundary[i] = count_bdy;
count_int += child_sizes[0];
count_bdy += child_sizes[1];
//Save them
vertex_counts[i].resize(2);
vertex_counts[i][0] = child_sizes[0];
vertex_counts[i][1] = child_sizes[1];
}
for (int i = 0; i < num_parts_to_consolidate; i++)
{
//Send back to contributing partitions
IVector_h offsets_to_send(4);
offsets_to_send[0] = offsets_interior[i];
offsets_to_send[1] = offsets_boundary[i];
offsets_to_send[2] = count_int;
offsets_to_send[3] = count_bdy;
if (parts_to_consolidate[i] == my_id)
{
my_offsets = offsets_to_send;
}
else
{
comms->send_vector(offsets_to_send, parts_to_consolidate[i], 778); // cannot make async, rewriting internal buffer
}
}
}
interior_offset = my_offsets[0];
boundary_offset = my_offsets[1] + my_offsets[2] - num_interior_rows;
total_interior_rows_in_merged = my_offsets[2];
total_boundary_rows_in_merged = my_offsets[3];
total_rows_in_merged = my_offsets[2] + my_offsets[3];
}
template <class TConfig>
template <class IVector_hd>
void DistributedManagerBase<TConfig>::consB2LmapsOnRoot(int &num_consolidated_neighbors, std::vector<IVector_hd> &consolidated_B2L_maps, IVector_h &consolidated_coarse_ids, std::vector<IVector_hd> &dest_coarse_B2L_maps, IVector_h &coarse_neigh_to_fine_part, IVector_h &num_bdy_per_coarse_neigh, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int my_id, int my_destination_part, bool is_root_partition, int num_coarse_neighbors, DistributedComms<TConfig> *comms)
{
// TODO: it is possible to optimize exchanges, for example fuse recv_vector in recreating coarse neigbours
// output: num_consolidated_neighbor, consolidated_B2L_maps, consolidated_coarse_ids
// input: dest_coarse_B2L_maps, is_root_partition, my_id, my_destination_part, num_fine_parts_to_consolidate, num_coarse_neighbors, coarse_neigh_to_fine_part, num_bdy_per_coarse_neigh
if (my_destination_part != my_id)
{
//if not root, send coarse neighbor list using fine indices and the corresponding boundary lists
IVector_h num_coarse_neigh(1);
num_coarse_neigh[0] = num_coarse_neighbors;
comms->send_vector_async(num_coarse_neigh, my_destination_part, 1111);
comms->send_vector_async(coarse_neigh_to_fine_part, my_destination_part, 2222);
comms->send_vector_async(num_bdy_per_coarse_neigh, my_destination_part, 3333);
for (int i = 0; i < num_coarse_neighbors; i++)
{
comms->send_vector_async(dest_coarse_B2L_maps[i], my_destination_part, 4444 + i) ;
}
comms->send_vector_wait_all(num_coarse_neigh);
comms->send_vector_wait_all(coarse_neigh_to_fine_part);
comms->send_vector_wait_all(num_bdy_per_coarse_neigh);
for (int i = 0; i < num_coarse_neighbors; i++)
{
comms->send_vector_wait_all(dest_coarse_B2L_maps[i]) ;
}
}
if (is_root_partition)
{
IVector_h num_coarse_ids_from_part(fine_parts_to_consolidate);
IVector_h_vector coarse_ids_from_part(num_fine_parts_to_consolidate);
IVector_h_vector num_coarse_neigh_bdys_from_part(num_fine_parts_to_consolidate);
//If root, receive sizes, and resize receive buffers
int total_num_coarse_ids = 0;
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
int current_part = fine_parts_to_consolidate[i];
IVector_h temp(1);
if (current_part != my_id)
{
comms->recv_vector(temp, current_part, 1111);
}
else
{
temp[0] = num_coarse_neighbors;
}
num_coarse_ids_from_part[i] = temp[0];
coarse_ids_from_part[i].resize(temp[0]);
num_coarse_neigh_bdys_from_part[i].resize(temp[0]);
total_num_coarse_ids += temp[0];
}
//Create a neighbor list for the consolidated coarse matrix, by merging coarse neighbor lists from partitions that are being merged
consolidated_coarse_ids.resize(total_num_coarse_ids);
int count = 0;
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
int current_part = fine_parts_to_consolidate[i];
// Get from each partition the coarse partition ids in their B2L maps
if (current_part != my_id)
{
comms->recv_vector(coarse_ids_from_part[i], current_part, 2222);
comms->recv_vector(num_coarse_neigh_bdys_from_part[i], current_part, 3333);
}
else
{
coarse_ids_from_part[i] = coarse_neigh_to_fine_part;
num_coarse_neigh_bdys_from_part[i] = num_bdy_per_coarse_neigh;
}
thrust::copy(coarse_ids_from_part[i].begin(), coarse_ids_from_part[i].end(), consolidated_coarse_ids.begin() + count);
count += num_coarse_ids_from_part[i];
}
cudaCheckError();
//eliminate duplicates
thrust::sort(consolidated_coarse_ids.begin(), consolidated_coarse_ids.end());
cudaCheckError();
consolidated_coarse_ids.erase(thrust::unique(consolidated_coarse_ids.begin(), consolidated_coarse_ids.end()), consolidated_coarse_ids.end());
cudaCheckError();
num_consolidated_neighbors = consolidated_coarse_ids.size();
IVector_h_vector coarse_ids_from_part_to_consolidated_neighbor(num_fine_parts_to_consolidate);;
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
coarse_ids_from_part_to_consolidated_neighbor[i].resize(num_coarse_ids_from_part[i]);
thrust::lower_bound(consolidated_coarse_ids.begin(), consolidated_coarse_ids.end(), coarse_ids_from_part[i].begin(), coarse_ids_from_part[i].end(), coarse_ids_from_part_to_consolidated_neighbor[i].begin());
}
cudaCheckError();
// Map each coarse partition to new coarse ID
consolidated_B2L_maps.resize(num_consolidated_neighbors);
IVector_h consolidated_B2L_maps_sizes(num_consolidated_neighbors);
// Offset in the consolidated_B2L_maps
IVector_h_vector coarse_ids_offsets(num_fine_parts_to_consolidate);
for (int i = 0; i < num_consolidated_neighbors; i++)
{
consolidated_B2L_maps_sizes[i] = 0;
}
// Compute the size of each consolidated_B2L_maps and offsets into it, where we will receive the parts coming from partitions that are getting merged into this one
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
coarse_ids_offsets[i].resize(num_coarse_ids_from_part[i]);
for (int j = 0; j < num_coarse_ids_from_part[i]; j++)
{
int coarse_id = coarse_ids_from_part[i][j];
int k = num_coarse_neigh_bdys_from_part[i][j];
coarse_ids_offsets[i][j] = consolidated_B2L_maps_sizes[ coarse_ids_from_part_to_consolidated_neighbor[i][j] ];
consolidated_B2L_maps_sizes[ coarse_ids_from_part_to_consolidated_neighbor[i][j] ] += k;
}
}
for (int i = 0; i < num_consolidated_neighbors; i++)
{
consolidated_B2L_maps[i].resize(consolidated_B2L_maps_sizes[i]);
}
// Receive the B2L maps from each child partition, concatenate them (gets sorted outside)
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
int current_part = fine_parts_to_consolidate[i];
for (int j = 0; j < num_coarse_ids_from_part[i]; j++)
{
int my_coarse_neigh = coarse_ids_from_part_to_consolidated_neighbor[i][j];
int offset = coarse_ids_offsets[i][j];
if (current_part != my_id)
{
comms->recv_vector( consolidated_B2L_maps[my_coarse_neigh], current_part, 4444 + j, offset, num_coarse_neigh_bdys_from_part[i][j]); //Need to do proper tagging here, otherwise messages from the same source would get mixed up
}
else
{
thrust::copy(dest_coarse_B2L_maps[j].begin(), dest_coarse_B2L_maps[j].end(), consolidated_B2L_maps[my_coarse_neigh].begin() + offset);
}
}
}
cudaCheckError();
}
}
template <class TConfig>
void DistributedManagerBase<TConfig>::consolidateAndRenumberHalos(IVector_h &aggregates, const IVector_h &manager_halo_offsets, IVector_h &halo_offsets, const IVector_h &neighbors, int num_fine_neighbors, const IVector_h &consolidated_coarse_ids, int num_consolidated_neighbors, const IVector_h &destination_part, int my_destination_part, bool is_root_partition, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int num_parts, int my_id, int total_rows_in_merged, int &num_all_aggregates, DistributedComms<TConfig> *comms)
{
consAndRenumberHalos(aggregates, manager_halo_offsets, halo_offsets, neighbors, num_fine_neighbors, consolidated_coarse_ids, num_consolidated_neighbors, destination_part, my_destination_part, is_root_partition, fine_parts_to_consolidate, num_fine_parts_to_consolidate, num_parts, my_id, total_rows_in_merged, num_all_aggregates, comms);
}
template <class TConfig>
void DistributedManagerBase<TConfig>::consolidateAndRenumberHalos(IVector_d &aggregates, const IVector_h &manager_halo_offsets, IVector_h &halo_offsets, const IVector_h &neighbors, int num_fine_neighbors, const IVector_h &consolidated_coarse_ids, int num_consolidated_neighbors, const IVector_h &destination_part, int my_destination_part, bool is_root_partition, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int num_parts, int my_id, int total_rows_in_merged, int &num_all_aggregates, DistributedComms<TConfig> *comms)
{
consAndRenumberHalos(aggregates, manager_halo_offsets, halo_offsets, neighbors, num_fine_neighbors, consolidated_coarse_ids, num_consolidated_neighbors, destination_part, my_destination_part, is_root_partition, fine_parts_to_consolidate, num_fine_parts_to_consolidate, num_parts, my_id, total_rows_in_merged, num_all_aggregates, comms);
}
template <class TConfig>
template <class IVector_hd>
void DistributedManagerBase<TConfig>::consAndRenumberHalos(IVector_hd &aggregates, const IVector_h &manager_halo_offsets, IVector_h &halo_offsets, const IVector_h &neighbors, int num_fine_neighbors, const IVector_h &consolidated_coarse_neigh_to_fine_part, int num_consolidated_neighbors, const IVector_h &destination_part, int my_destination_part, bool is_root_partition, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int num_parts, int my_id, int total_rows_in_merged, int &num_all_aggregates, DistributedComms<TConfig> *comms)
{
/*
* EXAMPLE 2
This example is independent from the previous ones.
Say partition 0 and 1 are merging (into 0) partition 0 is neighbors with 1,2,3 and partition 1 is neighbors with 0,3,4
Partitions 3 and 4 are merging (into partition 3) and partition 2 is not merging with anyone.
This example details the renumbering of halo indices on partition 0 and partition 1.
aggregates on partition 0:
[(fine interior nodes) (fine boundary nodes) (fine halo from part 1) (fine halo from part 2) (fine halo from part 3)]
[(fine interior nodes) (fine boundary nodes) (13 13 15) (12 15 17) (14 16 18)]
aggregates on partition 1:
[(fine interior nodes) (fine boundary nodes) (fine halo from part 0) (fine halo from part 3) (fine halo from part 4)]
[(fine interior nodes) (fine boundary nodes) (14 16 17) (18 19 19) (15 15 17)]
manager_halo_offsets on partition 0:
[22 25 28 31]
manager_halo_offsets on partition 1:
[20 23 26 29]
halo_offsets on both partitions are uninitialised: [0 0 0] and [0 0]
neighbors on partition 0: [1 2 3] partition 1: [0 3 4]
num_fine_neighbors partition 0: 3 partition 1: 3
consolidated_coarse_neigh_to_fine_part partition 0: [2 3] partition 1: [3]
num_consolidated_neighbors partition 0: 2 partition 1: 1
destination_part [0 0 2 3 3]
my_destination_part partition 0: 0 partition 1: 0
is_root_partition partition 0: true partition 1: false
fine_parts_to_consolidate partition 0: [0 1]
num_fine_parts_to_consolidate partition 0: 2
num_parts 5
my_id partition 0: 0 partition 1: 1
total_rows_in_merged partition 0 and 1: 24 (=sum of the two below)
num_all_aggregates partition partition 0: 13 partition 1: 11 - will be updated with the number of halo aggregates
*/
//
// Step 9.2 - com up with nonmerge lists
//
int num_fine_nonmerge_neighbors;// = fine_nonmerge_neighbors.size();
//NUmber of neighbors we are not merging with
num_fine_nonmerge_neighbors = 0;
for (int i = 0 ; i < num_fine_neighbors; i++)
{
if (destination_part[neighbors[i]] != my_destination_part)
{
num_fine_nonmerge_neighbors++;
}
}
IVector_h halo_sizes(num_fine_nonmerge_neighbors);
IVector_h fine_nonmerge_neigh_to_cons_fine_part(num_fine_nonmerge_neighbors);
IVector_h fine_nonmerge_neighbor_to_fine_neighbor(num_fine_nonmerge_neighbors);
num_fine_nonmerge_neighbors = 0;
for (int i = 0 ; i < num_fine_neighbors; i++)
{
if (destination_part[neighbors[i]] != my_destination_part)
{
halo_sizes[num_fine_nonmerge_neighbors] = manager_halo_offsets[i + 1] - manager_halo_offsets[i];
fine_nonmerge_neighbor_to_fine_neighbor[num_fine_nonmerge_neighbors] = i;
fine_nonmerge_neigh_to_cons_fine_part[num_fine_nonmerge_neighbors] = destination_part[neighbors[i]];
num_fine_nonmerge_neighbors++;
}
}
/*
* EXAMPLE 2
num_fine_nonmerge_neighbors partition 0: 2 partition 1: 2
fine_nonmerge_neighbor_to_fine_neighbor partition 0: [1 2] partition 1: [1 2]
fine_nonmerge_neigh_to_cons_fine_part partition 0: [2 3] partition 1: [3 3]
halo_sizes partition 0: [3 3] partition 1: [3 3]
*/
//Send them to root along with the halo parts of the aggregates vector
if (!is_root_partition)
{
IVector_h num_fine_nonmerge_neigh(1);
num_fine_nonmerge_neigh[0] = num_fine_nonmerge_neighbors;
// TODO: async? might be faster.
comms->send_vector(num_fine_nonmerge_neigh, my_destination_part, 1111);
comms->send_vector(halo_sizes, my_destination_part, 2222);
comms->send_vector(fine_nonmerge_neigh_to_cons_fine_part, my_destination_part, 3333);
// Here check l2h_identity flag and act accordingly
for (int i = 0; i < num_fine_nonmerge_neighbors; i++)
{
comms->send_vector_async(aggregates, my_destination_part, 4444 + i, manager_halo_offsets[fine_nonmerge_neighbor_to_fine_neighbor[i]], halo_sizes[i]) ;
}
//comms->send_vector_wait_all(num_fine_nonmerge_neigh);
//comms->send_vector_wait_all(halo_sizes);
//comms->send_vector_wait_all(fine_nonmerge_neigh_to_cons_fine_part);
comms->send_vector_wait_all(aggregates);
/*
* EXAMPLE 2
Partition 1 sends to partition 0:
num_fine_nonmerge_neigh 2
halo_sizes [3 3]
fine_nonmerge_neigh_to_cons_fine_part [3 3]
for loop: sends two pieces: [(18 19 19)] [(15 15 17)]
*/
}
if (is_root_partition)
{
//
// Step 9.3 Root receives this info, creates metadata
//
std::vector<VecInt_t> num_fine_nonmerge_neigh_array(num_fine_parts_to_consolidate);
IVector_h_vector halo_sizes_array(num_fine_parts_to_consolidate);
IVector_h_vector fine_nonmerge_neigh_to_cons_fine_part_array(num_fine_parts_to_consolidate);
std::vector<std::vector<IVector> > fine_halo_aggregates_to_root_array(num_fine_parts_to_consolidate);
std::vector<VecInt_t> min_index_coarse_halo(num_consolidated_neighbors, 0x7FFFFFFF);
std::vector<VecInt_t> max_index_coarse_halo(num_consolidated_neighbors, 0);
std::vector<VecInt_t> fine_part_to_consolidated_neighbor(num_parts, -1);
for (int i = 0; i < num_consolidated_neighbors; i++)
{
fine_part_to_consolidated_neighbor[consolidated_coarse_neigh_to_fine_part[i]] = i;
}
/*
* EXAMPLE 2
everything from here on is for partition 0, since that is the root partition
fine_part_to_consolidated_neighbor [-1 -1 0 1 -1]
*/
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
int current_part = fine_parts_to_consolidate[i];
IVector_h temp(1);
if (current_part != my_id)
{
comms->recv_vector(temp, current_part, 1111);
}
else
{
temp[0] = num_fine_nonmerge_neighbors;
}
num_fine_nonmerge_neigh_array[i] = temp[0];
halo_sizes_array[i].resize(temp[0]);
fine_nonmerge_neigh_to_cons_fine_part_array[i].resize(temp[0]);
fine_halo_aggregates_to_root_array[i].resize(temp[0]);
if (current_part != my_id)
{
comms->recv_vector(halo_sizes_array[i], current_part, 2222);
}
else
{
halo_sizes_array[i] = halo_sizes;
}
if (current_part != my_id)
{
comms->recv_vector(fine_nonmerge_neigh_to_cons_fine_part_array[i], current_part, 3333);
}
else
{
fine_nonmerge_neigh_to_cons_fine_part_array[i] = fine_nonmerge_neigh_to_cons_fine_part;
}
//Receive the halo regions
for (int j = 0; j < temp[0]; j++)
{
fine_halo_aggregates_to_root_array[i][j].resize(halo_sizes_array[i][j]);
if (current_part != my_id)
{
comms->recv_vector(fine_halo_aggregates_to_root_array[i][j], current_part, 4444 + j);
}
else
{
//HERE
thrust::copy(aggregates.begin() + manager_halo_offsets[fine_nonmerge_neighbor_to_fine_neighbor[j]],
aggregates.begin() + manager_halo_offsets[fine_nonmerge_neighbor_to_fine_neighbor[j]] + halo_sizes[j],
fine_halo_aggregates_to_root_array[i][j].begin()); //TODO: not do this copying around on the root
}
#define MIN(a,b) a<b?a:b;
#define MAX(a,b) a>b?a:b;
//Find minimum and maximum halo indices as not to allocate too much scratch space later
int min_index = thrust::reduce(fine_halo_aggregates_to_root_array[i][j].begin(), fine_halo_aggregates_to_root_array[i][j].end(), int(0x7FFFFFFF), thrust::minimum<int>());
int max_index = thrust::reduce(fine_halo_aggregates_to_root_array[i][j].begin(), fine_halo_aggregates_to_root_array[i][j].end(), int(0), thrust::maximum<int>());
min_index_coarse_halo[fine_part_to_consolidated_neighbor[fine_nonmerge_neigh_to_cons_fine_part_array[i][j]]] = MIN((int)min_index_coarse_halo[fine_part_to_consolidated_neighbor[fine_nonmerge_neigh_to_cons_fine_part_array[i][j]]], min_index);
max_index_coarse_halo[fine_part_to_consolidated_neighbor[fine_nonmerge_neigh_to_cons_fine_part_array[i][j]]] = MAX((int)max_index_coarse_halo[fine_part_to_consolidated_neighbor[fine_nonmerge_neigh_to_cons_fine_part_array[i][j]]], max_index);
}
}
cudaCheckError();
/*
* EXAMPLE 2
num_fine_nonmerge_neigh_array = [2 2]
halo_sizes_array = [[3 3][3 3]]
fine_nonmerge_neigh_to_cons_fine_part_array[][] = [[2 3][3 3]]
fine_halo_aggregates_to_root_array[from][to][fine halo vertex] [[[12 15 17][14 16 18]]
[[18 19 19][15 15 17]]]
min_index_coarse_halo[12 14]
max_index_coarse_halo[17 19]
*/
halo_offsets[0] = total_rows_in_merged;
//Now we have all the halo nodes, let's renumber them.
int min_index = thrust::reduce(min_index_coarse_halo.begin(), min_index_coarse_halo.end(), int(0x7FFFFFFF), thrust::minimum<int>());
int max_index = thrust::reduce(max_index_coarse_halo.begin(), max_index_coarse_halo.end(), int(0), thrust::maximum<int>());
cudaCheckError();
//
// Step 9.4 compute halo indices on root nodes
//
int scratch_size;
if (num_consolidated_neighbors == 0)
{
scratch_size = 1;
}
else
{
scratch_size = max_index - min_index + 2;
}
IVector scratch(scratch_size);
for (int i = 0; i < num_consolidated_neighbors; i++)
{
thrust::fill(scratch.begin(), scratch.end(), 0);
int dest_part = consolidated_coarse_neigh_to_fine_part[i];
//Flag halo indices that occur for a specific coarse neighbor
for (int j = 0; j < num_fine_parts_to_consolidate; j++)
{
for (int k = 0; k < num_fine_nonmerge_neigh_array[j]; k++)
{
if (fine_nonmerge_neigh_to_cons_fine_part_array[j][k] == dest_part)
{
int size = halo_sizes_array[j][k];
this->flag_halo_ids(size, scratch, fine_halo_aggregates_to_root_array[j][k], min_index_coarse_halo[i], max_index, min_index);
}
}
}
thrust::exclusive_scan(scratch.begin(), scratch.end(), scratch.begin(), halo_offsets[i]); //renumber them with the proper offset into our halo
halo_offsets[i + 1] = scratch[scratch.size() - 1];
//now read them back
for (int j = 0; j < num_fine_parts_to_consolidate; j++)
{
for (int k = 0; k < num_fine_nonmerge_neigh_array[j]; k++)
{
if (fine_nonmerge_neigh_to_cons_fine_part_array[j][k] == dest_part)
{
int size = halo_sizes_array[j][k];
int block_size = 128;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (size - 1) / block_size + 1);
this->read_halo_ids(size, scratch, fine_halo_aggregates_to_root_array[j][k], min_index_coarse_halo[i]);
//and send them back to contributing partitions
cudaDeviceSynchronize(); //TODO: don't need to synchronize when using GPUDirect
int current_part = fine_parts_to_consolidate[j];
int tag = 4444 + dest_part;
if (current_part != my_id)
{
comms->send_vector_async(fine_halo_aggregates_to_root_array[j][k], current_part, tag); //!!!!: we are sending them back not in sequential order, need tags!!!!
}
else
{
thrust::copy(fine_halo_aggregates_to_root_array[j][k].begin(), fine_halo_aggregates_to_root_array[j][k].end(), aggregates.begin() + manager_halo_offsets[fine_nonmerge_neighbor_to_fine_neighbor[k]]);
}
}
}
}
/*
* EXAMPLE 2
the array that is sent back in pieces:
fine_halo_aggregates_to_root_array[from][to][fine halo vertex] [[[24 25 26][27 29 31]]
[[31 32 32][28 28 30]]]
halo_offsets = [24 27 33]
*/
} // Loop over consolidated neighbors
cudaCheckError();
// Wait for sends to have completed (this is to prevent fine_halo_aggregates_to_root_array to be destroyed before send has finished)
for (int i = 0; i < num_consolidated_neighbors; i++)
{
int dest_part = consolidated_coarse_neigh_to_fine_part[i];
for (int j = 0; j < num_fine_parts_to_consolidate; j++)
{
for (int k = 0; k < num_fine_nonmerge_neigh_array[j]; k++)
{
if (fine_nonmerge_neigh_to_cons_fine_part_array[j][k] == dest_part)
{
int current_part = fine_parts_to_consolidate[j];
if (current_part != my_id)
{
comms->send_vector_wait_all(fine_halo_aggregates_to_root_array[j][k]);
}
}
}
}
} // Loop over consolidated neighbors
//Send total number of rows in the aggregated matrix
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
int current_part = fine_parts_to_consolidate[i];
IVector_h total_rows(1);
total_rows[0] = halo_offsets[num_consolidated_neighbors];
if (current_part != my_id)
{
comms->send_vector(total_rows, current_part, 5555);
}
else
{
num_all_aggregates = total_rows[0];
}
}
} // If is root partition
if (!is_root_partition)
{
for (int i = 0; i < num_fine_nonmerge_neighbors; i++)
{
int tag = 4444 + fine_nonmerge_neigh_to_cons_fine_part[i];
comms->recv_vector(aggregates, my_destination_part, tag, manager_halo_offsets[fine_nonmerge_neighbor_to_fine_neighbor[i]], halo_sizes[i]);
}
IVector_h total_rows(1);
comms->recv_vector(total_rows, my_destination_part, 5555);
num_all_aggregates = total_rows[0];
}
/*
* EXAMPLE 2
num_all_aggregates = 33 (both partitions 0 and 1
*/
}
template <class TConfig>
void DistributedManagerBase<TConfig>::ipcExchangePtr(void *&ptr, bool is_root_partition, int num_parts_to_consolidate, IVector_h &parts_to_consolidate, int my_root_partition, int my_id, DistributedComms<TConfig> *comms)
{
cudaIpcMemHandle_t handle;
if (is_root_partition)
{
cudaIpcGetMemHandle( (cudaIpcMemHandle_t *) &handle, ptr ) ;
for (int i = 0; i < num_parts_to_consolidate; i++)
{
int current_part = parts_to_consolidate[i];
if (current_part != my_id)
{
comms->send_raw_data(&handle, sizeof(handle), current_part, 456);
}
}
}
else
{
comms->recv_raw_data(&handle, sizeof(handle), my_root_partition, 456);
cudaError_t err = cudaIpcOpenMemHandle( (void **) &ptr, handle, cudaIpcMemLazyEnablePeerAccess);
}
}
template <class TConfig>
void DistributedManagerBase<TConfig>::ipcWaitForChildren(bool is_root_partition, int num_parts_to_consolidate, IVector_h &parts_to_consolidate, int my_destination_part, int my_id, DistributedComms<TConfig> *comms)
{
cudaEvent_t event;
cudaIpcEventHandle_t event_handle;
cudaEventCreate(&event, cudaEventDisableTiming | cudaEventInterprocess);
cudaIpcGetEventHandle( &event_handle, event);
// Each rank record the event
cudaEventRecord(event);
if (is_root_partition)
{
std::vector<cudaEvent_t> child_events(num_parts_to_consolidate);
std::vector<cudaIpcEventHandle_t> child_event_handles(num_parts_to_consolidate);
// Root partition receives event_handles from child and stores in child_event_handles
for (int i = 0; i < num_parts_to_consolidate; i++)
{
int current_part = parts_to_consolidate[i];
if (current_part != my_id)
{
comms->recv_raw_data(&(child_event_handles[i]), sizeof(cudaIpcEventHandle_t), current_part, 987 + current_part);
cudaIpcOpenEventHandle(&child_events[i], child_event_handles[i]);
}
}
for (int i = 0; i < num_parts_to_consolidate; i++)
{
if (parts_to_consolidate[i] != my_id)
{
cudaEventSynchronize(child_events[i]);
}
}
}
else
{
comms->send_raw_data(&event_handle, sizeof(cudaIpcEventHandle_t), my_destination_part, 987 + my_id);
}
}
template <class TConfig>
void DistributedManagerBase<TConfig>::ipcWaitForRoot(bool is_root_partition, int num_parts_to_consolidate, IVector_h &parts_to_consolidate, int my_destination_part, int my_id, DistributedComms<TConfig> *comms)
{
cudaEvent_t event;
cudaIpcEventHandle_t event_handle;
cudaEventCreate(&event, cudaEventDisableTiming | cudaEventInterprocess);
if (is_root_partition)
{
cudaIpcGetEventHandle( &event_handle, event);
// Root records the event
cudaEventRecord(event);
// Root partition sends event_handles to child
for (int i = 0; i < num_parts_to_consolidate; i++)
{
int current_part = parts_to_consolidate[i];
if (current_part != my_id)
{
comms->send_raw_data(&event_handle, sizeof(event_handle), current_part, 988 + current_part);
}
}
}
else
{
comms->recv_raw_data(&event_handle, sizeof(event_handle), my_destination_part, 988 + my_id);
cudaIpcOpenEventHandle(&event, event_handle);
cudaEventSynchronize(event);
}
}
template <class TConfig>
void DistributedManagerBase<TConfig>::read_halo_ids(int size, IVector_d &scratch, IVector_d &halo_aggregates, VecInt_t min_index_coarse_halo)
{
int block_size = 128;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (size - 1) / block_size + 1);
read_halo_ids_kernel <<< num_blocks, block_size>>>(scratch.raw(), halo_aggregates.raw(), min_index_coarse_halo, size);
cudaCheckError();
}
template <class TConfig>
void DistributedManagerBase<TConfig>::read_halo_ids(int size, IVector_h &scratch, IVector_h &halo_aggregates, VecInt_t min_index_coarse_halo)
{
FatalError("read_halo_ids not implemented on host yet", AMGX_ERR_NOT_IMPLEMENTED);
}
template <class TConfig>
void DistributedManagerBase<TConfig>::flag_halo_ids(int size, IVector_d &scratch, IVector_d &halo_aggregates, VecInt_t min_index_coarse_halo, int max_index, int min_index)
{
int block_size = 128;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (size - 1) / block_size + 1);
flag_halo_ids_kernel <<< num_blocks, block_size>>>(scratch.raw(), halo_aggregates.raw(), min_index_coarse_halo, size, max_index - min_index + 1);
cudaCheckError();
}
template <class TConfig>
void DistributedManagerBase<TConfig>::flag_halo_ids(int size, IVector_h &scratch, IVector_h &halo_aggregates, VecInt_t min_index_coarse_halo, int max_index, int min_index)
{
FatalError("flag_halo_ids not implemented on host yet", AMGX_ERR_NOT_IMPLEMENTED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::consolidateAndUploadAll(int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag, Matrix<TConfig> &A)
{
FatalError("Fine level consolidation not implemented on host yet", AMGX_ERR_NOT_IMPLEMENTED);
}
template<class TConfig>
void DistributedManagerBase<TConfig>::exchangeSolveResultsConsolidation(int &num_iters, std::vector<PODVector_h> &res_history, AMGX_STATUS &status, bool store_res_history)
{
int bsize = this->A->get_block_size();
PODVector_h res_history_tmp;
if (!m_is_fine_level_consolidated)
{
return;
}
else
{
int my_id = this->getFineLevelComms()->get_global_id();
IVector_h my_num_iters(1);
if (m_is_fine_level_root_partition)
{
my_num_iters[0] = num_iters;
if (store_res_history)
{
// Pack the res_history vector into array
res_history_tmp.resize( (num_iters + 1)*bsize);
for (int i = 0; i < num_iters + 1; i++)
{
for (int j = 0; j < bsize; j++)
{
res_history_tmp[i * bsize + j] = res_history[i][j];
}
}
}
for (int i = 0; i < m_num_fine_level_parts_to_consolidate; i++)
{
int current_part = m_fine_level_parts_to_consolidate[i];
if (my_id != current_part)
{
getFineLevelComms()->send_vector_async(my_num_iters, current_part, 245);
if (store_res_history)
{
getFineLevelComms()->send_vector_async(res_history_tmp, current_part, 246);
}
}
}
for (int i = 0; i < m_num_fine_level_parts_to_consolidate; i++)
{
int current_part = m_fine_level_parts_to_consolidate[i];
if (my_id != current_part)
{
getFineLevelComms()->send_raw_data(&status, sizeof(status), current_part, 247);
}
}
getFineLevelComms()->send_vector_wait_all(my_num_iters);
if (store_res_history)
{
getFineLevelComms()->send_vector_wait_all(res_history_tmp);
}
}
else
{
// Store num_iters
getFineLevelComms()->recv_vector(my_num_iters, m_my_fine_level_destination_part, 245);
num_iters = my_num_iters[0];
if (store_res_history)
{
// Fill res_history vector
res_history.resize(num_iters + 1);
res_history_tmp.resize( (num_iters + 1)*bsize);
getFineLevelComms()->recv_vector(res_history_tmp, m_my_fine_level_destination_part, 246);
for (int i = 0; i < num_iters + 1; i++)
{
res_history[i].resize(bsize);
for (int j = 0; j < bsize; j++)
{
res_history[i][j] = res_history_tmp[i * bsize + j];
}
}
}
getFineLevelComms()->recv_raw_data(&status, sizeof(status), m_my_fine_level_destination_part, 247);
}
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::consolidateAndUploadAll(int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag, Matrix<TConfig> &in_A)
{
this->A = &in_A;
this->createComms(this->A->getResources()); //refresh comms
DistributedComms<TConfig> *comms = this->getComms();
int my_id = comms->get_global_id();
int num_parts = comms->get_num_partitions();
int num_rings = this->num_halo_rings();
int num_neighbors = this->neighbors.size();
// All partitions have to call this, otherwise it fails
// Step 1: Figure out which partition should be consolidated together based on their host_name and their PCI-E slot ID
IVector_h destination_part(num_parts);
this->computeDestinationPartitionsWithCons(my_id, num_parts, destination_part, comms);
int my_destination_part = destination_part[my_id];
// Check if I'm root partition and how many msgs I will receive
bool is_root_partition = false;
int num_parts_to_consolidate = 0;
for (int i = 0; i < num_parts; i++)
{
if (destination_part[i] == my_id)
{
is_root_partition = true;
num_parts_to_consolidate++;
}
}
if (my_destination_part >= num_parts)
{
FatalError("During consolidation, sending data to partition that doesn't exist", AMGX_ERR_NOT_IMPLEMENTED);
}
// Create cons_part_to_part map
IVector_h cons_part_to_part = destination_part;
thrust::sort(cons_part_to_part.begin(), cons_part_to_part.end());
cudaCheckError();
cons_part_to_part.erase(thrust::unique(cons_part_to_part.begin(), cons_part_to_part.end()), cons_part_to_part.end());
cudaCheckError();
int num_cons_partitions = cons_part_to_part.size();
// If number of consolidated partitions is the same as number of partitions, simply call uploadAll
if (num_cons_partitions == num_parts)
{
this->initializeUploadReorderAll(n, nnz, block_dimx, block_dimy, row_ptrs, col_indices, data, diag, *(this->A));
this->m_is_fine_level_consolidated = false;
return;
}
if (is_root_partition)
{
this->A->getResources()->expandRootPool();
}
this->m_is_fine_level_consolidated = true;
if (num_rings != 1)
{
FatalError("num_rings > 1 not supported in fine_level consolidation", AMGX_ERR_NOT_IMPLEMENTED);
}
// Fill with b2l_maps
IVector_h_vector B2L_maps_tmp;
B2L_maps_tmp.resize(num_neighbors);
for (int i = 0; i < num_neighbors; i++)
{
B2L_maps_tmp[i] = this->cached_B2L_maps[i];
}
bool useCudaIpc = this->m_use_cuda_ipc_consolidation;
mat_value_type *data_hd = NULL;
mat_value_type *diag_hd = NULL;
int *col_indices_hd = NULL;
int data_alloc = 0;
int diag_alloc = 0;
int col_alloc = 0;
col_indices_hd = (int *) this->getDevicePointerForData((void *)col_indices, nnz * block_dimx * block_dimy * sizeof(int), &col_alloc);
data_hd = (mat_value_type *) this->getDevicePointerForData((void *)data, nnz * block_dimx * block_dimy * sizeof(mat_value_type), &data_alloc);
if (diag != NULL)
{
diag_hd = (mat_value_type *) this->getDevicePointerForData((void *)diag, nnz * block_dimx * block_dimy * sizeof(mat_value_type), &diag_alloc);
}
// Copy the original row_offsets array (this is required when replacing coefficients
this->m_old_row_offsets_CONS.resize(n + 1);
cudaMemcpy(this->m_old_row_offsets_CONS.raw(), row_ptrs, (n + 1)*sizeof(int), cudaMemcpyDefault);
cudaCheckError();
this->m_old_nnz_CONS = nnz;
// This function:
// Creates fine level consolidated matrices
// Modifies the btl_maps, lth_maps
// Create part_to_cons_part map
IVector_h part_to_cons_part(num_parts);
thrust::lower_bound(cons_part_to_part.begin(), cons_part_to_part.end(), destination_part.begin(), destination_part.end(), part_to_cons_part.begin());
cudaCheckError();
IVector_h neigh_to_part;
this->createNeighToDestPartMap(neigh_to_part, this->neighbors, destination_part, num_neighbors);
IVector_h cons_neigh_to_part;
int num_cons_neighbors;
this->createConsolidatedNeighToPartMap(cons_neigh_to_part, neigh_to_part, my_destination_part, destination_part, num_cons_neighbors);
IVector_h neigh_to_cons_neigh;
this->createNeighToConsNeigh( neigh_to_cons_neigh, cons_neigh_to_part, neigh_to_part, my_destination_part, num_neighbors);
// ---------------------------------------
// MERGE B2L MAPS BASED ON DEST PARTITION
// ---------------------------------------
IVector_h_vector dest_B2L_maps;
this->consolidateB2Lmaps(dest_B2L_maps, B2L_maps_tmp, neigh_to_cons_neigh, num_cons_neighbors, num_neighbors);
// ------------------------------------
// Renumber interior and boundary rows
// ------------------------------------
int num_interior_rows;
int num_boundary_rows;
IVector_h renumbering;
this->createAggregatesRenumbering(renumbering, dest_B2L_maps, n, num_cons_neighbors, num_interior_rows, num_boundary_rows, num_rings);
// --------------------------------------------------
// Create list of destination parts to consolidate
// --------------------------------------------------
// Store whether or not this is a root partition on fine level
IVector_h parts_to_consolidate;
parts_to_consolidate.resize(num_parts_to_consolidate);
int count = 0;
for (int i = 0; i < num_parts; i++)
{
if (destination_part[i] == my_id)
{
parts_to_consolidate[count] = i;
count++;
}
}
// ---------------------------------------------------------------------
// Each partition computes its offset for its interior and boundary nodes
// ---------------------------------------------------------------------
IVector_h_vector vertex_counts;
int interior_offset, boundary_offset, total_interior_rows_in_merged, total_boundary_rows_in_merged;
int total_rows_in_merged;
this->computeConsolidatedOffsets(my_id, my_destination_part, is_root_partition, num_interior_rows, num_boundary_rows, vertex_counts, parts_to_consolidate, num_parts_to_consolidate, interior_offset, boundary_offset, total_interior_rows_in_merged, total_boundary_rows_in_merged, total_rows_in_merged, comms);
// -----------------------------------
// Each partition renumber it's rows
// -----------------------------------
int total_num_halos = 0;
// Pack new bdy_ids
for (int i = 0; i < num_neighbors; i++)
{
total_num_halos += this->cached_L2H_maps[i].size();
}
IVector_h row_ids(n + total_num_halos, -1);
this->m_row_ids_CONS.resize(n + total_num_halos);
// Renumber the interior and boundary rows
for (int i = 0; i < n; i++)
{
int new_id;
if (renumbering.size() == 0)
{
new_id = i;
}
else
{
new_id = renumbering[i];
}
new_id += ((new_id >= num_interior_rows) ? boundary_offset : interior_offset);
row_ids[i] = new_id;
}
for (int i = 0; i < num_cons_neighbors; i++)
{
thrust::transform(dest_B2L_maps[i].begin(),
dest_B2L_maps[i].end(),
thrust::constant_iterator<index_type>(boundary_offset),
dest_B2L_maps[i].begin(),
thrust::plus<index_type>());
}
cudaCheckError();
// -------------------------------------------------
// Send dest_B2L_maps to root partitions
// ------------------------------------------------
IVector_h num_bdy_per_cons_neigh(num_cons_neighbors);
for (int i = 0; i < num_cons_neighbors; i++)
{
num_bdy_per_cons_neigh[i] = dest_B2L_maps[i].size();
}
IVector_h root_cons_neighbors;
int root_num_cons_neighbors = 0;
IVector_h_vector cons_B2L_maps;
this->consolidateB2LmapsOnRoot(root_num_cons_neighbors, cons_B2L_maps, root_cons_neighbors, dest_B2L_maps, cons_neigh_to_part, num_bdy_per_cons_neigh, parts_to_consolidate, num_parts_to_consolidate, my_id, my_destination_part, is_root_partition, num_cons_neighbors, comms);
IVector_h halo_ids_offsets(num_neighbors + 1);
IVector_h halo_ids;
int halo_ids_size = 0;
halo_ids_offsets[0] = 0;
for (int i = 0; i < num_neighbors; i++)
{
halo_ids_size += this->cached_L2H_maps[i].size();
halo_ids_offsets[i + 1] = halo_ids_size;
}
halo_ids.resize(halo_ids_size);
// Do exchange with neighbors
// Pack new bdy_ids
IVector_h_vector bdy_ids;
bdy_ids.resize(num_neighbors);
for (int i = 0; i < num_neighbors; i++)
{
int size = this->cached_B2L_maps[i].size();
bdy_ids[i].resize(size);
// Pack buffer
for (int j = 0; j < size; j++)
{
bdy_ids[i][j] = row_ids[this->cached_B2L_maps[i][j]];
}
}
for (int i = 0; i < num_neighbors; i++)
{
comms->send_vector_async(bdy_ids[i], this->neighbors[i], 6666 + this->neighbors[i]);
}
for (int i = 0; i < num_neighbors; i++)
{
comms->recv_vector(halo_ids, this->neighbors[i], 6666 + my_id, halo_ids_offsets[i], this->cached_L2H_maps[i].size());
}
for (int i = 0; i < num_neighbors; i++)
{
comms->send_vector_wait_all(bdy_ids[i]);
}
IVector_h halo_offsets(root_num_cons_neighbors + 1, 0);
int root_num_rows;
this->consolidateAndRenumberHalos(halo_ids, halo_ids_offsets, halo_offsets, this->neighbors, num_neighbors, root_cons_neighbors, root_num_cons_neighbors, destination_part, my_destination_part, is_root_partition, parts_to_consolidate, num_parts_to_consolidate, num_parts, my_id, total_rows_in_merged, root_num_rows, comms);
if (is_root_partition)
{
this->B2L_maps.resize(cons_B2L_maps.size());
for (int i = 0; i < cons_B2L_maps.size(); i++)
{
thrust::sort(cons_B2L_maps[i].begin(), cons_B2L_maps[i].end());
this->B2L_maps[i].copy(cons_B2L_maps[i]); // H2D copy of B2L maps
}
cudaCheckError();
}
// Now renumber the row_ids based on lth_maps
count = 0;
for (int i = 0; i < num_neighbors; i++)
{
for (int j = 0; j < this->cached_L2H_maps[i].size(); j++)
{
row_ids[this->cached_L2H_maps[i][j]] = halo_ids[count];
count++;
}
}
cudaMemcpy(this->m_row_ids_CONS.raw(), row_ids.raw(), (n + total_num_halos)*sizeof(int), cudaMemcpyDefault);
cudaCheckError();
int bsize = block_dimx * block_dimy;
if (is_root_partition)
{
this->A->row_offsets.resize(root_num_rows + 1);
}
void *root_row_ptr = (void *) this->A->row_offsets.raw();
if (useCudaIpc)
{
// ----------------------------------------------------
// 1. cudaIPC to get pointer to root's row_offset array
// ----------------------------------------------------
this->ipcExchangePtr(root_row_ptr, is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms);
cudaCheckError();
// -------------------------------------------------------------------
// 2. each rank copy it's row length on root partition using row_ids
// -------------------------------------------------------------------
int cta_size = 128;
int grid_size = min(4096, (n + total_num_halos + cta_size - 1) / cta_size);
zero_copy_row_lengths_ids_offsets<mat_value_type> <<< grid_size, cta_size>>>(this->m_old_row_offsets_CONS.raw(), ((int *) root_row_ptr) /* IPC */, this->m_row_ids_CONS.raw(), n, total_num_halos, (mat_value_type *) diag);
cudaCheckError();
// Root partition waits for children to be done writing their result
this->ipcWaitForChildren(is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms);
cudaCheckError();
}
else // CudaIpcNotAvailable
{
this->checkPinnedBuffer( max( nnz * sizeof(mat_value_type), (n + 1)*max(sizeof(index_type), sizeof(value_type)) ) );
if (!is_root_partition)
{
IVector_h data_to_send(3);
data_to_send[0] = n;
data_to_send[1] = nnz;
data_to_send[2] = total_num_halos;
int dummy;
void *row_ptrs_to_send = this->getHostPointerForData((void *)row_ptrs, sizeof(index_type) * (n + 1), &dummy);
comms->send_vector(data_to_send, my_destination_part, 10000 + my_id);
comms->send_raw_data(row_ptrs_to_send, (n + 1)*sizeof(int), my_destination_part, 10001 + my_id);
comms->send_raw_data(&row_ids[0], (n + total_num_halos)*sizeof(int), my_destination_part, 10002 + my_id);
}
else
{
cudaEvent_t event;
cudaEventCreate(&event);
//TODO: Could use streams here
//TODO: Avoid extra device to host copies
std::vector<IVector_h> data_recv(num_parts_to_consolidate);
for (int i = 0; i < num_parts_to_consolidate; i++)
{
data_recv[i].resize(3);
int current_part = parts_to_consolidate[i];
if (current_part != my_id)
{
comms->recv_vector(data_recv[i], current_part, 10000 + current_part);
}
else
{
data_recv[i][0] = n;
data_recv[i][1] = nnz;
data_recv[i][2] = total_num_halos;
}
}
this->m_child_n.resize(num_parts_to_consolidate);
this->m_child_nnz.resize(num_parts_to_consolidate);
this->m_child_num_halos.resize(num_parts_to_consolidate);
this->m_child_row_ids.resize(num_parts_to_consolidate);
this->m_child_old_row_offsets.resize(num_parts_to_consolidate);
int max_n = 0;
int max_nnz = 0;
for (int i = 0; i < num_parts_to_consolidate; i++)
{
int current_part = parts_to_consolidate[i];
this->m_child_n[i] = data_recv[i][0];
this->m_child_nnz[i] = data_recv[i][1];
this->m_child_num_halos[i] = data_recv[i][2];
if (this->m_child_n[i] > max_n) { max_n = this->m_child_n[i]; }
if (this->m_child_nnz[i] > max_nnz) { max_nnz = this->m_child_nnz[i]; }
this->m_child_row_ids[i].resize(this->m_child_n[i] + this->m_child_num_halos[i]);
this->m_child_old_row_offsets[i].resize(this->m_child_n[i] + 1);
}
this->m_child_max_n = max_n;
this->m_child_max_nnz = max_nnz;
for (int i = 0; i < num_parts_to_consolidate; i++)
{
int current_part = parts_to_consolidate[i];
int cta_size = 128;
int grid_size = min(4096, (this->m_child_n[i] + this->m_child_num_halos[i] + cta_size - 1) / cta_size);
if (current_part != my_id)
{
comms->recv_vector(this->m_child_old_row_offsets[i], current_part, 10001 + current_part, 0, this->m_child_n[i] + 1);
comms->recv_vector(this->m_child_row_ids[i], current_part, 10002 + current_part, 0, this->m_child_n[i] + this->m_child_num_halos[i]);
zero_copy_row_lengths_ids_offsets<mat_value_type> <<< grid_size, cta_size>>>(this->m_child_old_row_offsets[i].raw(), this->A->row_offsets.raw(), this->m_child_row_ids[i].raw(), this->m_child_n[i], this->m_child_num_halos[i], (mat_value_type *) diag);
// Wait for kernel to finish before overwriting host buffer
cudaEventRecord(event);
cudaEventSynchronize(event);
}
else
{
zero_copy_row_lengths_ids_offsets<mat_value_type> <<< grid_size, cta_size>>>(this->m_old_row_offsets_CONS.raw(), this->A->row_offsets.raw(), this->m_row_ids_CONS.raw(), n, total_num_halos, (mat_value_type *) diag);
cudaEventRecord(event);
cudaEventSynchronize(event);
}
}
cudaCheckError();
cudaEventDestroy(event);
} // If root partition
//TODO: is this necessary
comms->barrier();
}
//3. root does a exclusive_scan
if (is_root_partition)
{
cudaEvent_t event;
cudaEventCreate(&event);
// Populate the halo rows with diagonal, increase the length of the halo rows
thrust::fill(this->A->row_offsets.begin() + halo_offsets[0], this->A->row_offsets.begin() + halo_offsets[root_num_cons_neighbors], 1);
thrust_wrapper::exclusive_scan(this->A->row_offsets.begin(), this->A->row_offsets.end(), this->A->row_offsets.begin());
cudaEventRecord(event);
cudaEventSynchronize(event);
cudaCheckError();
this->A->set_initialized(0);
this->A->delProps(DIAG); // We always insert the diagonal
this->A->delProps(COO); // No COO
this->A->setColsReorderedByColor(false); // Cols not reordered by color
int nnz = this->A->row_offsets[root_num_rows]; // This is a device to host copy
this->A->resize(root_num_rows, root_num_rows, nnz, block_dimx, block_dimy);
this->A->set_num_nz(nnz); // num_nz doesn't include halo rows
//this->A->set_initialized(1);
cudaEventDestroy(event);
}
else
{
this->A->set_initialized(0);
this->A->resize( 0, 0, 0, block_dimx, block_dimy );
this->A->delProps(DIAG); // We always insert the diagonal
this->A->delProps(COO); // No COO
this->A->setColsReorderedByColor(false); // Cols not reordered by color
//this->A->set_initialized(1);
}
if (useCudaIpc)
{
// ----------------------------------------------
// 4. Do ipc consolidation of values and columns
// ----------------------------------------------
// Child partition waits for parent to create row_offsets
this->ipcWaitForRoot(is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms);
void *root_col_ptr = (void *) this->A->col_indices.raw();
void *root_val_ptr = (void *) this->A->values.raw();
this->ipcExchangePtr(root_col_ptr, is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms);
this->ipcExchangePtr(root_val_ptr, is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms);
int cta_size2 = 128;
int grid_size2 = min(4096, (n + cta_size2 - 1) / cta_size2);
ipc_consolidation_upload_matrix<mat_value_type> <<< grid_size2, cta_size2>>>(n, this->m_row_ids_CONS.raw(), this->m_old_row_offsets_CONS.raw(), ( (int *) root_row_ptr ) /*IPC*/, col_indices_hd, ( (int *) root_col_ptr) /*IPC*/, data_hd, ( (mat_value_type *) root_val_ptr ) /*IPC*/, diag_hd, bsize);
cudaCheckError();
// Root partition waits for children to upload their matrices
this->ipcWaitForChildren(is_root_partition, num_parts_to_consolidate, parts_to_consolidate, my_destination_part, my_id, comms);
cudaCheckError();
// Child partitions close their mem handle (they are done upload data)
if (!is_root_partition)
{
cudaIpcCloseMemHandle(root_row_ptr);
cudaIpcCloseMemHandle(root_val_ptr);
cudaIpcCloseMemHandle(root_col_ptr);
}
}
else // If cudaIpcNotAvailable
{
if (!is_root_partition)
{
int dummy;
void *col_indices_to_send = this->getHostPointerForData((void *)col_indices, sizeof(index_type) * nnz, &dummy);
comms->send_raw_data(col_indices_to_send, nnz * sizeof(int), my_destination_part, 10000 + my_id);
void *data_to_send = this->getHostPointerForData((void *)data, sizeof(mat_value_type) * nnz, &dummy);
comms->send_raw_data(data_to_send, nnz * bsize * sizeof(mat_value_type), my_destination_part, 10001 + my_id);
if (diag != NULL)
{
void *diag_to_send = this->getHostPointerForData((void *)diag, sizeof(mat_value_type) * n, &dummy);
comms->send_raw_data(diag_to_send, n * bsize * sizeof(mat_value_type), my_destination_part, 10002 + my_id);
}
}
else
{
cudaEvent_t event;
cudaEventCreate(&event);
//TODO: Could use streams here
int *child_col_indices;
mat_value_type *child_data;
mat_value_type *child_diag = NULL;
cudaHostAlloc( (void **) &child_col_indices, this->m_child_max_nnz * sizeof(int), cudaHostAllocMapped);
cudaHostAlloc( (void **) &child_data, this->m_child_max_nnz * bsize * sizeof(mat_value_type), cudaHostAllocMapped);
if (diag != NULL)
{
cudaHostAlloc( (void **) &child_diag, (this->m_child_max_n)*bsize * sizeof(mat_value_type), cudaHostAllocMapped);
}
for (int i = 0; i < num_parts_to_consolidate; i++)
{
int current_part = parts_to_consolidate[i];
int cta_size2 = 128;
int grid_size2 = min(4096, (this->m_child_n[i] + cta_size2 - 1) / cta_size2);
if (current_part != my_id)
{
comms->recv_raw_data(child_col_indices, this->m_child_nnz[i]*sizeof(int), current_part, 10000 + current_part);
comms->recv_raw_data(child_data, this->m_child_nnz[i]*bsize * sizeof(mat_value_type), current_part, 10001 + current_part);
if (diag != NULL)
{
comms->recv_raw_data(child_diag, this->m_child_n[i]*bsize * sizeof(mat_value_type), current_part, 10002 + current_part);
}
int *child_col_indices_hd;
mat_value_type *child_data_hd;
mat_value_type *child_diag_hd = NULL;
cudaHostGetDevicePointer(&child_col_indices_hd, child_col_indices, 0);
cudaHostGetDevicePointer(&child_data_hd, child_data, 0);
if (diag != NULL)
{
cudaHostGetDevicePointer(&child_diag_hd, child_diag, 0);
}
ipc_consolidation_upload_matrix<mat_value_type> <<< grid_size2, cta_size2>>>(this->m_child_n[i], this->m_child_row_ids[i].raw(), this->m_child_old_row_offsets[i].raw(), this->A->row_offsets.raw(), child_col_indices_hd, this->A->col_indices.raw(), child_data_hd, this->A->values.raw(), child_diag_hd, bsize);
// Wait for kernel to finish before overwriting host buffer
cudaEventRecord(event);
cudaEventSynchronize(event);
}
else
{
ipc_consolidation_upload_matrix<mat_value_type> <<< grid_size2, cta_size2>>>(n, this->m_row_ids_CONS.raw(), this->m_old_row_offsets_CONS.raw(), this->A->row_offsets.raw(), col_indices_hd, this->A->col_indices.raw(), data_hd, this->A->values.raw(), diag_hd, bsize);
cudaEventRecord(event);
cudaEventSynchronize(event);
}
}
cudaCheckError();
cudaEventDestroy(event);
cudaFreeHost(child_col_indices);
cudaFreeHost(child_data);
if (diag != NULL)
{
cudaFreeHost(child_diag);
}
} // If root partition
//TODO: is this necessary
comms->barrier();
}
// Store the original fine level communicator
this->m_is_fine_level_root_partition = is_root_partition;
this->m_my_fine_level_destination_part = my_destination_part;
// Create a clone of the original communicator
this->m_fine_level_comms = comms; //this->_comms is the same pointer that this->m_fine_level_comms right now, so we can overwrite this->_comms, but make sure that we release m_fine_level_cons
this->_comms = this->m_fine_level_comms->CloneSubComm(cons_part_to_part, is_root_partition); // this->_comms will be empty comm for non-root partition and new comm for root ranks only if root partition
this->m_fine_level_id = my_id;
if (is_root_partition)
{
int cta_size = 128;
int grid_size3 = min(4096, ( (root_num_rows - halo_offsets[0]) + cta_size - 1) / cta_size);
if (grid_size3 != 0)
{
set_halo_cols_values <<< grid_size3, cta_size>>>(this->A->row_offsets.raw(), this->A->col_indices.raw(), this->A->values.raw(), halo_offsets[0], root_num_rows, bsize);
cudaCheckError();
}
int my_cons_id = part_to_cons_part[my_id];
this->_global_id = my_cons_id;
this->_num_interior_nodes = total_interior_rows_in_merged;
this->_num_boundary_nodes = total_boundary_rows_in_merged;
for (int i = 0; i < root_num_cons_neighbors; i++)
{
root_cons_neighbors[i] = part_to_cons_part[root_cons_neighbors[i]];
}
this->_comms->set_neighbors(root_num_cons_neighbors);
this->neighbors = root_cons_neighbors;
this->halo_offsets = halo_offsets; // H2D copy of halo offsets
this->m_num_fine_level_parts_to_consolidate = num_parts_to_consolidate;
this->m_fine_level_parts_to_consolidate = parts_to_consolidate;
this->set_num_halo_rings(num_rings);
this->set_num_halo_rows(halo_offsets[root_num_cons_neighbors] - halo_offsets[0]);
// B2L_maps has already been copied
this->B2L_rings.resize(root_num_cons_neighbors);
for (int i = 0; i < root_num_cons_neighbors; i++)
{
this->B2L_rings[i].resize(2);
this->B2L_rings[i][0] = 0;
this->B2L_rings[i][1] = cons_B2L_maps[i].size();
}
this->set_initialized(this->A->row_offsets);
this->A->set_initialized(0);
this->A->delProps(DIAG);
this->A->diag.resize(root_num_rows);
this->A->computeDiagonal(); //
this->A->setView(OWNED);
cudaEventCreate(&(this->comm_event));
this->A->set_initialized(1);
}
else
{
this->neighbors.resize(0);
this->halo_offsets.resize(0);
}
/* free memory (if needed) */
if (col_alloc) { cudaFree(col_indices_hd); }
if (data_alloc) { cudaFree(data_hd); }
if (diag_alloc) { cudaFree(diag_hd); }
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::replaceMatrixCoefficientsNoCons(int n, int nnz, const mat_value_type *data_pinned, const mat_value_type *diag_pinned)
{
//matrix parameters
//int num_nnz = this->A->get_num_nz();
int num_rows = this->halo_offsets[0];
int total_rows = num_rows + this->num_halo_rows();
int block_size = this->A->get_block_size();
mat_value_type *data_hd = NULL;
mat_value_type *diag_hd = NULL;
int data_alloc = 0;
int diag_alloc = 0;
//cuda parameters
int num_blocks = min(4096, (num_rows + 127) / 128);
/* WARNING: the number of non-zero elements (nnz) in the array data_pinned and A->values (num_nnz) might be different at this point.
1. If the matrix has CSR property and therefore diagonal is included in the matrix this values will be the same.
2. If the matrix has DIAG property and therefore diagonal is originally stored separately, and later appended to the array
of values, and subsequently inserted into the matrix than num_nnz = nnz + n. We have to account for this fact when replacing the
coefficients (and use nnz not num_nnz).
obs.: see calls to computeDiagonal (matrix.cu), AMGX_matrix_upload and AMGX_replace_coefficients (amgx_c.cu), and
uploadMatrix and replaceMatrixCoefficients[No|With]Cons (distributed_manager.cu) for details. */
/* check early exit */
if ((this->neighbors.size() == 0 || this->renumbering.size() == 0) && !this->m_is_fine_level_glued)
{
return;
}
cudaCheckError();
/* allocate if data and diag if they are not pinned */
data_hd = (mat_value_type *) this->getDevicePointerForData((void *)data_pinned, nnz * block_size * sizeof(mat_value_type), &data_alloc);
if (diag_pinned != NULL)
{
diag_hd = (mat_value_type *) this->getDevicePointerForData((void *)diag_pinned, num_rows * block_size * sizeof(mat_value_type), &diag_alloc);
}
/* replace the values (reordering them if needed) */
if (insertDiagonals && diag_pinned != NULL)
{
replace_values_matrix <32> <<< num_blocks, 512>>>(data_hd, diag_hd, this->old_row_offsets.raw(), this->A->row_offsets.raw(), this->A->values.raw(), this->renumbering.raw(), block_size, num_rows);
}
else
{
replace_values_matrix <32> <<< num_blocks, 512>>>(data_hd, this->old_row_offsets.raw(), this->A->row_offsets.raw(), this->A->values.raw(), this->renumbering.raw(), block_size, num_rows);
if (diag_pinned != NULL)
{
reorder_vector_values <<< num_blocks, 512>>>(this->A->values.raw() + this->A->row_offsets[total_rows]*block_size, diag_hd, this->renumbering.raw(), block_size, num_rows);
}
}
cudaCheckError();
/* free memory (if needed) */
if (data_alloc) { cudaFree(data_hd); }
if (diag_alloc) { cudaFree(diag_hd); }
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::replaceMatrixCoefficientsWithCons(int n, int nnz, const mat_value_type *data_pinned, const mat_value_type *diag_pinned)
{
//matrix parameters
//int num_nnz = this->A->get_num_nz();
/* WARNING: in consolidation, for non-root partitions, halo_offsets
might be NULL due to the call halo_offsets.resize(0); at the end
of the routine uploadMatrix->consolidateAndUploadAll. We should
use the parameter n instead this->halo_offsets[0] for num_rows. */
int num_rows = n;
int block_size = this->A->get_block_size();
mat_value_type *data_hd = NULL;
mat_value_type *diag_hd = NULL;
int data_alloc = 0;
int diag_alloc = 0;
data_hd = (mat_value_type *) this->getDevicePointerForData((void *)data_pinned, nnz * block_size * sizeof(mat_value_type), &data_alloc);
if (diag_pinned != NULL)
{
diag_hd = (mat_value_type *) this->getDevicePointerForData((void *)diag_pinned, num_rows * block_size * sizeof(mat_value_type), &diag_alloc);
}
bool useCudaIpc = this->m_use_cuda_ipc_consolidation;
if (useCudaIpc)
{
// Child partitions wait for root to be done
this->ipcWaitForRoot(this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms());
cudaCheckError();
void *root_row_ptr = (void *) this->A->row_offsets.raw();
void *root_val_ptr = (void *) this->A->values.raw();
this->ipcExchangePtr(root_row_ptr, this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms());
this->ipcExchangePtr(root_val_ptr, this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms());
// replace the values, insert the diagonal
int ncons = this->m_old_row_offsets_CONS.size() - 1;
int cta_size = 128;
int grid_size2 = min(4096, (ncons + cta_size - 1) / cta_size);
ipc_consolidation_replace_values<mat_value_type> <<< grid_size2, cta_size>>>(ncons, this->m_row_ids_CONS.raw(), this->m_old_row_offsets_CONS.raw(), ( (int *) root_row_ptr )/*IPC*/, data_hd, ( (mat_value_type *) root_val_ptr )/*IPC*/, diag_hd, this->A->get_block_size() );
cudaCheckError();
// Root partition wait for child to be done replacing their values
this->ipcWaitForChildren(this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms());
cudaCheckError();
if (!this->m_is_fine_level_root_partition)
{
cudaIpcCloseMemHandle(root_row_ptr);
cudaIpcCloseMemHandle(root_val_ptr);
}
}
else // cudaIpcNotAvailable
{
if (this->m_is_fine_level_consolidated) // aggregation
{
int bsize = this->A->get_block_size();
int ncons = this->m_old_row_offsets_CONS.size() - 1;
if (!this->m_is_fine_level_root_partition)
{
int dummy;
int nnzcons = this->m_old_nnz_CONS;
void *data_to_send = this->getHostPointerForData((void *)data_pinned, nnzcons * bsize * sizeof(mat_value_type), &dummy);
this->getFineLevelComms()->send_raw_data(data_to_send, nnzcons * bsize * sizeof(mat_value_type), this->m_my_fine_level_destination_part, 10001 + this->fine_level_id());
if (diag_pinned != NULL)
{
void *diag_to_send = this->getHostPointerForData((void *)diag_pinned, ncons * bsize * sizeof(mat_value_type), &dummy);
this->getFineLevelComms()->send_raw_data(diag_to_send, ncons * bsize * sizeof(mat_value_type), this->m_my_fine_level_destination_part, 10002 + this->fine_level_id());
}
}
else
{
cudaEvent_t event;
cudaEventCreate(&event);
//TODO: Could use streams here
mat_value_type *child_data;
mat_value_type *child_diag = NULL;
cudaHostAlloc( (void **) &child_data, this->m_child_max_nnz * bsize * sizeof(mat_value_type), cudaHostAllocMapped);
if (diag_pinned != NULL)
{
cudaHostAlloc( (void **) &child_diag, (this->m_child_max_n)*bsize * sizeof(mat_value_type), cudaHostAllocMapped);
}
for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++)
{
int current_part = this->m_fine_level_parts_to_consolidate[i];
int cta_size2 = 128;
int grid_size2 = min(4096, (this->m_child_n[i] + cta_size2 - 1) / cta_size2);
if (current_part != this->fine_level_id())
{
this->getFineLevelComms()->recv_raw_data(child_data, this->m_child_nnz[i]*bsize * sizeof(mat_value_type), current_part, 10001 + current_part);
if (diag_pinned != NULL)
{
this->getFineLevelComms()->recv_raw_data(child_diag, this->m_child_n[i]*bsize * sizeof(mat_value_type), current_part, 10002 + current_part);
}
mat_value_type *child_data_hd;
mat_value_type *child_diag_hd = NULL;
cudaHostGetDevicePointer(&child_data_hd, child_data, 0);
if (diag_pinned != NULL)
{
cudaHostGetDevicePointer(&child_diag_hd, child_diag, 0);
}
ipc_consolidation_replace_values<mat_value_type> <<< grid_size2, cta_size2>>>(this->m_child_n[i], this->m_child_row_ids[i].raw(), this->m_child_old_row_offsets[i].raw(), this->A->row_offsets.raw(), child_data_hd, this->A->values.raw(), child_diag_hd, bsize);
// Wait for kernel to finish before overwriting host buffer
cudaEventRecord(event);
cudaEventSynchronize(event);
}
else
{
ipc_consolidation_replace_values<mat_value_type> <<< grid_size2, cta_size2>>>(ncons, this->m_row_ids_CONS.raw(), this->m_old_row_offsets_CONS.raw(), this->A->row_offsets.raw(), data_hd, this->A->values.raw(), diag_hd, bsize);
//cudaEventRecord(event);
//cudaEventSynchronize(event);
}
}
cudaCheckError();
cudaEventDestroy(event);
cudaFreeHost(child_data);
if (diag_pinned != NULL)
{
cudaFreeHost(child_diag);
}
} // If root partition
//TODO: is this necessary
this->getFineLevelComms()->barrier();
} //agg
else if (this->m_is_fine_level_glued) // classical
{
int bsize = this->A->get_block_size();
int ncons = this->m_old_row_offsets_CONS.size() - 1;
IVector_h nnz_off;
nnz_off.resize(this->getConsolidationArrayOffsets().size());
IVector_h nnz_array;
nnz_array.resize(this->getConsolidationArrayOffsets().size() - 1);
this->getFineLevelComms()->all_gather( nnz,
nnz_array,
this->getConsolidationArrayOffsets().size() - 1);
nnz_off[0] = 0;
for (int i = 0; i < nnz_array.size(); i++)
{
nnz_off[i + 1] = nnz_off[i] + nnz_array[i];
}
if (!this->m_is_fine_level_root_partition)
{
int dummy;
void *data_to_send = this->getHostPointerForData((void *)data_pinned, nnz * bsize * sizeof(mat_value_type), &dummy);
this->getFineLevelComms()->send_raw_data( data_to_send,
nnz * bsize * sizeof(mat_value_type),
this->m_my_fine_level_destination_part,
10001 + this->fine_level_id());
if (diag_pinned != NULL)
{
void *diag_to_send = this->getHostPointerForData((void *)diag_pinned, n * sizeof(mat_value_type), &dummy);
this->getFineLevelComms()->send_raw_data( diag_to_send,
n * bsize * sizeof(mat_value_type),
this->m_my_fine_level_destination_part,
10002 + this->fine_level_id());
//diag.resize(0);
cudaCheckError();
}
//values.resize(0);
cudaCheckError();
}
else
{
//TODO: Could use streams here
mat_value_type *child_data;
mat_value_type *child_diag = NULL;
// Assumes partions have been glued already
this->A->getNnzForView(OWNED, &nnz);
cudaHostAlloc( (void **) &child_data, nnz * bsize * sizeof(mat_value_type), cudaHostAllocMapped);
if (diag_pinned != NULL)
{
cudaHostAlloc( (void **) &child_diag, this->halo_offsets[this->neighbors.size()]*bsize * sizeof(mat_value_type), cudaHostAllocMapped);
}
// roots copy their data
memcpy ( &child_data[0], data_pinned, nnz_array[this->fine_level_id()]*sizeof(value_type));
if (diag_pinned != NULL)
{
memcpy ( &child_diag[0], diag_pinned, n * sizeof(value_type));
}
for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++)
{
int current_part = this->m_fine_level_parts_to_consolidate[i];
int current_offset = nnz_off[current_part] - nnz_off[this->fine_level_id()] ;
int current_nnz = nnz_array[current_part];
if (current_part != this->fine_level_id())
{
this->getFineLevelComms()->recv_raw_data( &child_data[current_offset],
current_nnz * bsize * sizeof(mat_value_type),
current_part,
10001 + current_part);
if (diag_pinned != NULL)
this->getFineLevelComms()->recv_raw_data( &child_diag[this->getConsolidationArrayOffsets()[current_part] - this->getConsolidationArrayOffsets()[this->fine_level_id()]],
(this->getConsolidationArrayOffsets()[current_part + 1] - this->getConsolidationArrayOffsets()[current_part])*bsize * sizeof(mat_value_type),
current_part,
10002 + current_part);
}
}
cudaCheckError();
// we can follow the usual upload path for raw data now
// Assumes partions have been glued already
int os;
this->A->getOffsetAndSizeForView(OWNED, &os, &n);
replaceMatrixCoefficientsNoCons( n, nnz, child_data, child_diag);
cudaCheckError();
cudaFreeHost(child_data);
if (diag_pinned != NULL)
{
cudaFreeHost(child_diag);
}
} // If root partition
//TODO: is this necessary
this->getFineLevelComms()->barrier();
} // cla
} // not ipc
this->A->setView(OWNED);
/* free memory (if needed) */
if (data_alloc) { cudaFree(data_hd); }
if (diag_alloc) { cudaFree(diag_hd); }
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::transformAndUploadVector(VVector_v &v, const void *data, int n, int block_dim)
{
if (this->isFineLevelConsolidated() || (this->isFineLevelGlued() && !this->isGlued()))
{
transformAndUploadVectorWithCons(v, data, n, block_dim);
}
else
{
v.resize(n * block_dim);
cudaCheckError();
// Upload on host
cudaMemcpy(v.raw(), (value_type *)data, n * block_dim * sizeof(value_type), cudaMemcpyDefault);
cudaCheckError();
// Permute based on renumbering vector
transformVector(v);
int tag = 0;
// Exchange halos
this->exchange_halo(v, tag);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::transformAndUploadVectorWithCons(VVector_v &v, const void *data_pinned, int n, int block_dim)
{
if (v.get_block_size() != this->A->get_block_dimx()) { printf("Blocksize mismatch!\n"); }
bool useCudaIpc = this->m_use_cuda_ipc_consolidation;
this->getFineLevelComms()->barrier();
void *root_temp_ptr = NULL;
VVector_v temp;
if (this->m_is_fine_level_root_partition && !this->m_is_fine_level_glued )
{
temp.resize(this->halo_offsets[this->neighbors.size()]*v.get_block_size(), types::util<value_type>::get_zero());
temp.set_block_dimx(v.get_block_dimx());
temp.set_block_dimy(v.get_block_dimy());
root_temp_ptr = (void *) temp.raw();
}
cudaCheckError();
int data_alloc = 0;
value_type *data_hd = NULL;
if (!this->m_is_fine_level_glued )
{
data_hd = (value_type *) this->getDevicePointerForData((void *)data_pinned, n * block_dim * sizeof(value_type), &data_alloc);
}
if (useCudaIpc)
{
// Do IPC
this->ipcExchangePtr(root_temp_ptr, this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms());
cudaCheckError();
int num_blocks = min(4096, (n + 511) / 512);
reorder_vector_values <<< num_blocks, 512>>>( (value_type *) root_temp_ptr, data_hd, this->m_row_ids_CONS.raw(), v.get_block_size(), n);
// Root partition waits for children to be done
this->ipcWaitForChildren(this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms());
cudaCheckError();
if (!this->m_is_fine_level_root_partition)
{
cudaIpcCloseMemHandle(root_temp_ptr);
}
}
else // If cudaIpcNotAvail
{
if (this->m_is_fine_level_consolidated) // aggregation
{
// Exchange the vector between root and child
if (!this->m_is_fine_level_root_partition)
{
IVector_h size(1);
size[0] = n;
this->getFineLevelComms()->send_vector(size, this->m_my_fine_level_destination_part, 20000 + this->fine_level_id());
int dummy;
void *data_to_send = this->getHostPointerForData((void *)data_pinned, n * v.get_block_size() * sizeof(value_type), &dummy);
this->getFineLevelComms()->send_raw_data(data_to_send, n * v.get_block_size()*sizeof(value_type), this->m_my_fine_level_destination_part, 20001 + this->fine_level_id());
}
else
{
cudaEvent_t event;
cudaEventCreate(&event);
IVector_h child_n(this->m_num_fine_level_parts_to_consolidate);
int max_n = 0;
for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++)
{
int current_part = this->m_fine_level_parts_to_consolidate[i];
if (current_part != this->fine_level_id())
{
this->getFineLevelComms()->recv_vector(child_n, current_part, 20000 + current_part, i, 1);
}
else
{
child_n[i] = n;
}
if (child_n[i] > max_n) { max_n = child_n[i]; }
}
value_type *child_data;
cudaHostAlloc( (void **) &child_data, max_n * v.get_block_size()*sizeof(value_type), cudaHostAllocMapped);
value_type *child_data_hd;
cudaHostGetDevicePointer(&child_data_hd, child_data, 0);
for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++)
{
int current_part = this->m_fine_level_parts_to_consolidate[i];
int num_blocks = min(4096, (child_n[i] + 511) / 512);
if (current_part != this->fine_level_id())
{
this->getFineLevelComms()->recv_raw_data(&child_data[0], child_n[i]*v.get_block_size()*sizeof(value_type), current_part, 20001 + current_part);
reorder_vector_values <<< num_blocks, 512>>>( (value_type *) root_temp_ptr, child_data_hd, this->m_child_row_ids[i].raw(), v.get_block_size(), child_n[i]);
cudaEventRecord(event);
cudaEventSynchronize(event);
cudaCheckError();
}
else
{
reorder_vector_values <<< num_blocks, 512>>>( (value_type *) root_temp_ptr, data_hd, this->m_row_ids_CONS.raw(), v.get_block_size(), n);
}
} // Loop over parts to consolidate
cudaCheckError();
cudaEventDestroy(event);
cudaFreeHost(child_data);
} // If root partition
} //agg
else if (this->m_is_fine_level_glued) // cla
{
value_type *child_data = NULL;
if (!this->m_is_fine_level_root_partition)
{
int dummy;
void *data_to_send = this->getHostPointerForData((void *)data_pinned, n * v.get_block_size() * sizeof(value_type), &dummy);
this->getFineLevelComms()->send_raw_data( data_to_send,
n * v.get_block_size()*sizeof(value_type),
this->m_my_fine_level_destination_part,
20001 + this->fine_level_id());
//v.resize(0); // just in case something resized it betwen iterations
cudaCheckError();
}
else
{
cudaHostAlloc( (void **) &child_data, this->halo_offsets[this->neighbors.size()]*v.get_block_size()*sizeof(value_type), cudaHostAllocMapped);
value_type *child_data_hd;
cudaHostGetDevicePointer(&child_data_hd, child_data, 0);
// roots copy their data
int dummy;
void *my_data = this->getHostPointerForData((void *)data_pinned, n * v.get_block_size() * sizeof(value_type), &dummy);
memcpy ( &child_data[0], data_pinned, n * v.get_block_size()*sizeof(value_type));
// Loop over parts to consolidate
for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++)
{
int current_part = this->m_fine_level_parts_to_consolidate[i];
if (current_part != this->fine_level_id())
{
this->getFineLevelComms()->recv_raw_data( &child_data[this->getConsolidationArrayOffsets()[current_part] - this->getConsolidationArrayOffsets()[this->fine_level_id()]],
sizeof(value_type) * (this->getConsolidationArrayOffsets()[current_part + 1] - this->getConsolidationArrayOffsets()[current_part]),
current_part,
20001 + current_part );
}
}
// usual path
// Upload on host
cudaMemcpy(v.raw(), (value_type *)child_data, v.size()* sizeof(value_type), cudaMemcpyDefault);
cudaCheckError();
} // If root partition
// Permute based on renumbering vector
transformVector(v);
cudaCheckError();
// Exchange halos
int tag = 0;
this->exchange_halo(v, tag);
cudaCheckError();
v.set_unconsolidated_size(n);
// free host
if (child_data) { cudaFreeHost(child_data); }
cudaCheckError();
} //cla
} // If cudaIpcAvailable
if (!this->m_is_fine_level_glued) // not needed for classcical
{
if (this->m_is_fine_level_root_partition)
{
v.swap(temp);
int tag = 0;
// Root partitions do the exchange
this->exchange_halo(v, tag);
}
v.set_unconsolidated_size(n * v.get_block_size());
v.set_transformed();
}
/* free memory (if needed) */
if (data_alloc) { cudaFree(data_hd); }
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::transformVector(VVector_v &v)
{
if (this->neighbors.size() == 0) { return; }
else if (this->renumbering.size() == 0)
{
v.resize(this->halo_offsets[this->neighbors.size()]*v.get_block_size());
return;
}
if (v.get_block_size() != this->A->get_block_dimx()) { printf("Blocksize mismatch!\n"); }
if (v.size() < this->halo_offsets[this->neighbors.size()]*v.get_block_size())
{
VVector_v temp(this->halo_offsets[this->neighbors.size()]*v.get_block_size(), types::util<value_type>::get_zero());
temp.set_block_dimx(v.get_block_dimx());
temp.set_block_dimy(v.get_block_dimy());
if (v.size() < this->halo_offsets[0]*this->A->get_block_dimx())
{
FatalError("Unknown size of input vector - smaller than the number of rows owned by this partition", AMGX_ERR_NOT_IMPLEMENTED);
}
//reorder based on row permutation
int size = this->halo_offsets[0];
int num_blocks = min(4096, (size + 511) / 512);
reorder_vector_values <<< num_blocks, 512>>>(temp.raw(), v.raw(), this->renumbering.raw(), v.get_block_size(), size);
v.swap(temp);
}
else
{
VVector_v temp(this->halo_offsets[0]*v.get_block_size());
int size = this->halo_offsets[0];
int num_blocks = min(4096, (size + 511) / 512);
reorder_vector_values <<< num_blocks, 512>>>(temp.raw(), v.raw(), this->renumbering.raw(), v.get_block_size(), size);
thrust::copy(temp.begin(), temp.end(), v.begin());
}
cudaCheckError();
v.set_transformed();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::revertVector(VVector_v &v)
{
if (this->neighbors.size() == 0 || this->renumbering.size() == 0) { return; }
if (v.get_block_size() != this->A->get_block_dimx()) { printf("Blocksize mismatch!\n"); }
VVector_v temp(this->halo_offsets[0]*this->A->get_block_dimx());
if (v.size() < this->halo_offsets[0]*v.get_block_size())
{
FatalError("Unknown size of input vector - smaller than the number of rows owned by this partition", AMGX_ERR_NOT_IMPLEMENTED);
}
//reorder based on row permutation
int size = this->halo_offsets[0];
int num_blocks = min(4096, (size + 511) / 512);
inverse_reorder_vector_values <<< num_blocks, 512>>>(temp.raw(), v.raw(), this->renumbering.raw(), v.get_block_size(), size);
//reorder_vector_values<<<num_blocks, 512>>>(temp.raw(), v.raw(), this->inverse_renumbering.raw(), v.get_block_size(), size);
cudaCheckError();
v.resize(this->halo_offsets[0]*this->A->get_block_dimx());
thrust::copy(temp.begin(), temp.end(), v.begin());
cudaCheckError();
}
template <class TConfig>
void DistributedManagerBase<TConfig>::computeDestinationPartitions(INDEX_TYPE upper_threshold, float avg_size, const int num_parts, int &new_num_parts, bool &wantNeighbors)
{
m_destination_partitions.resize(num_parts);
std::vector<int> dp(num_parts);
if (avg_size < 1.f) { avg_size = 1.f; } // avoid floating point exception
int wanted_num_fine_parts_to_consolidate = ( upper_threshold + (int) avg_size - 1) / ( (int) avg_size );
new_num_parts = (num_parts + wanted_num_fine_parts_to_consolidate - 1) / wanted_num_fine_parts_to_consolidate;
for (int i = 0; i < num_parts; i++)
{
dp[i] = i % new_num_parts;
}
// example wantNeighbors = true -> destination_part = [0 0 0 0 4 4 4 4 8 8 8 8]
// example wantNeighbors = false -> destination_part = [0 1 2 3 0 1 2 3 0 1 2 3]
if (wantNeighbors)
{
std::sort (dp.begin(), dp.end());
m_destination_partitions[0] = 0;
for (int i = 1; i < num_parts; i++)
{
if (dp[i - 1] < dp[i])
{
m_destination_partitions[i] = i;
}
else
{
m_destination_partitions[i] = m_destination_partitions[i - 1];
}
}
}
m_my_destination_part = m_destination_partitions[global_id()];
}
template <class TConfig>
void DistributedManagerBase<TConfig>::computeDestinationPartitionsWithCons(int my_id, int num_parts, IVector_h &destination_part, DistributedComms<TConfig> *comms)
{
int device_id = this->A->getResources()->getDevice(0);
std::string my_hostname_tmp;
comms->get_hostname(my_hostname_tmp);
// Append PCI-E ID to string
cudaDeviceProp dev_prop;
cudaGetDeviceProperties(&dev_prop, device_id);
std::stringstream s;
s << my_hostname_tmp << "_" << dev_prop.pciBusID << "_" << dev_prop.pciDeviceID;
std::string my_hostname(s.str());
std::vector<std::string> hostnames;
comms->exchange_hostnames(my_hostname, hostnames, num_parts);
std::vector<std::string>::iterator low = std::find( hostnames.begin(), hostnames.end(), my_hostname );
int my_destination_part = low - hostnames.begin();
// Do a gather into destination_part
comms->all_gather(my_destination_part, destination_part, num_parts);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::revertAndDownloadVector(VVector_v &v_in, const void *data, int n, int block_dimy)
{
if (this->isFineLevelConsolidated() || this->isFineLevelGlued())
{
revertAndDownloadVectorWithCons(v_in, data, n, block_dimy);
}
else
{
if ( n == 0 )
{
FatalError("Cannot download if size = 0", AMGX_ERR_NOT_IMPLEMENTED);
}
if (data == NULL )
{
FatalError("Cannot download to a NULL pointer", AMGX_ERR_NOT_IMPLEMENTED);
}
if (v_in.size() == 0 )
{
FatalError("Cannot download an empty vector", AMGX_ERR_NOT_IMPLEMENTED);
}
VVector_v v_out;
revertVector(v_in, v_out);
cudaMemcpy((value_type *)data, v_out.raw(), n * block_dimy * sizeof(value_type), cudaMemcpyDefault);
cudaCheckError();
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::revertVector(VVector_v &v_in, VVector_v &v_out)
{
if (!this->isFineLevelGlued() && this->neighbors.size() == 0 || this->renumbering.size() == 0) { return;}
if (v_in.get_block_size() != this->A->get_block_dimx()) { printf("Blocksize mismatch!\n"); }
if (v_in.size() < this->halo_offsets[0]*v_in.get_block_size())
{
FatalError("Unknown size of input vector - smaller than the number of rows owned by this partition", AMGX_ERR_NOT_IMPLEMENTED);
}
int size = this->halo_offsets[0];
if (v_out.size() != size * this->A->get_block_dimx())
{
v_out.resize(size * this->A->get_block_dimx());
}
//reorder based on row permutation
int num_blocks = min(4096, (size + 511) / 512);
inverse_reorder_vector_values <<< num_blocks, 512>>>(v_out.raw(), v_in.raw(), this->renumbering.raw(), v_in.get_block_size(), size);
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::revertAndDownloadVectorWithCons(VVector_v &v_in, const void *data_pinned, int n, int block_dimy)
{
if (v_in.get_block_size() != this->A->get_block_dimx()) { printf("Blocksize mismatch!\n"); }
void *root_v_ptr = NULL;
int size = v_in.get_unconsolidated_size();
int num_rows = size / v_in.get_block_size();
if (this->m_is_fine_level_root_partition)
{
root_v_ptr = (void *) v_in.raw();
}
VVector_v temp;
temp.set_block_dimx(v_in.get_block_dimx());
temp.set_block_dimy(v_in.get_block_dimy());
temp.resize(size);
bool useCudaIpc = this->m_use_cuda_ipc_consolidation;
if (useCudaIpc)
{
// Do IPC
this->ipcExchangePtr(root_v_ptr, this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms());
// Children partition waits for parent to be done updating vector
this->ipcWaitForRoot(this->m_is_fine_level_root_partition, this->m_num_fine_level_parts_to_consolidate, this->m_fine_level_parts_to_consolidate, this->m_my_fine_level_destination_part, this->fine_level_id(), this->getFineLevelComms());
cudaCheckError();
//reorder based on row permutation
int num_blocks = min(4096, (num_rows + 511) / 512);
inverse_reorder_vector_values <<< num_blocks, 512>>>( temp.raw(), (value_type *) root_v_ptr, this->m_row_ids_CONS.raw(), v_in.get_block_size(), num_rows);
cudaCheckError();
if (!this->m_is_fine_level_root_partition)
{
cudaIpcCloseMemHandle(root_v_ptr);
}
}
else
{
if (this->m_is_fine_level_consolidated) // aggregation
{
if (this->m_is_fine_level_root_partition)
{
IVector_h child_n(this->m_num_fine_level_parts_to_consolidate);
int max_n = 0;
for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++)
{
int current_part = this->m_fine_level_parts_to_consolidate[i];
if (current_part != this->fine_level_id())
{
this->getFineLevelComms()->recv_vector(child_n, current_part, 30000 + current_part, i, 1);
}
else
{
child_n[i] = num_rows;
}
if (child_n[i] > max_n) { max_n = child_n[i]; }
}
// Resize temp vector
VVector_v child_temp;;
child_temp.resize(max_n * v_in.get_block_size());
for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++)
{
int current_part = this->m_fine_level_parts_to_consolidate[i];
// Pack the vector to be sent
int num_blocks = min(4096, (child_n[i] + 511) / 512);
if (current_part != this->fine_level_id())
{
inverse_reorder_vector_values <<< num_blocks, 512>>>( child_temp.raw(), (value_type *) root_v_ptr, this->m_child_row_ids[i].raw(), v_in.get_block_size(), child_n[i]);
this->getFineLevelComms()->send_vector(child_temp, current_part, 30001 + current_part, 0, child_n[i]*v_in.get_block_size());
}
else
{
inverse_reorder_vector_values <<< num_blocks, 512>>>( temp.raw(), (value_type *) root_v_ptr, this->m_row_ids_CONS.raw(), v_in.get_block_size(), child_n[i]);
}
}
cudaCheckError();
}
else
{
IVector_h size(1);
size[0] = num_rows;
this->getFineLevelComms()->send_vector(size, this->m_my_fine_level_destination_part, 30000 + this->fine_level_id());
this->getFineLevelComms()->recv_vector(temp, this->m_my_fine_level_destination_part, 30001 + this->fine_level_id());
}
}
else if (this->m_is_fine_level_glued) // classical
{
if (this->m_is_fine_level_root_partition)
{
temp.resize(v_in.size());
revertVector(v_in, temp);
cudaCheckError();
for (int i = 0; i < this->m_num_fine_level_parts_to_consolidate; i++)
{
int current_part = this->m_fine_level_parts_to_consolidate[i];
if (current_part != this->fine_level_id())
{
this->getFineLevelComms()->send_vector( temp,
current_part,
current_part + 30001,
this->getConsolidationArrayOffsets()[current_part] - this->getConsolidationArrayOffsets()[this->fine_level_id()],
this->getConsolidationArrayOffsets()[current_part + 1] - this->getConsolidationArrayOffsets()[current_part] );
cudaCheckError();
}
}
}
else
{
this->getFineLevelComms()->recv_vector(temp, this->m_my_fine_level_destination_part, 30001 + this->fine_level_id());
cudaCheckError();
}
temp.resize(this->getConsolidationArrayOffsets()[this->fine_level_id() + 1] - this->getConsolidationArrayOffsets()[this->fine_level_id()]);
cudaCheckError();
}
}
// Copy on host
cudaMemcpy((value_type *)data_pinned, temp.raw(), temp.size() * sizeof(value_type), cudaMemcpyDefault);
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::transformAndUploadVector(VVector_v &v, const void *data, int n, int block_dim)
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::transformVector(VVector_v &v)
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::transformAndUploadVectorWithCons(VVector_v &v, const void *data, int n, int block_dim)
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::revertAndDownloadVector(VVector_v &v, const void *data, int n, int block_dim)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::revertVector(VVector_v &v_in, VVector_v &v_out)
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::revertVector(VVector_v &v)
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::revertAndDownloadVectorWithCons(VVector_v &v_in, const void *data, int n, int block_dim)
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::reorder_matrix()
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::reorder_matrix_owned()
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::obtain_shift_l2g_reordering(index_type n, I64Vector_h &l2g, IVector_h &p, IVector_h &q)
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::unpack_partition(index_type *Bp, index_type *Bc, mat_value_type *Bv)
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::generatePoisson7pt(int nx, int ny, int nz, int P, int Q, int R)
{
FatalError("GeneratePoisson7pt only implemented on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
template <typename t_colIndex>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::loadDistributedMatrix(
int num_rows, int num_nonzeros, const int block_dimx, const int block_dimy, const int *row_offsets,
const t_colIndex *col_indices, const mat_value_type *values, int num_ranks, int num_rows_global, const void *diag, const MatrixDistribution &dist)
{
FatalError("loadDistributedMatrix only implemented on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::createOneRingB2Lmaps()
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::createOneRingHaloRows()
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::replaceMatrixCoefficientsNoCons(int n, int nnz, const mat_value_type *data, const mat_value_type *diag)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::replaceMatrixCoefficientsWithCons(int n, int nnz, const mat_value_type *data, const mat_value_type *diag)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedManager<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::createRenumbering(IVector &renumbering)
{
if (this->neighbors.size() > 0)
{
FatalError("Distributed solve only supported on devices", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template < class TConfig >
void DistributedManagerBase<TConfig>::remove_boundary(IVector_h &flagArray, IVector_h &B2L_map, int size)
{
for (int i = 0; i < size; i++)
{
flagArray[B2L_map[i]] = 0;
}
}
template < class TConfig >
void DistributedManagerBase<TConfig>::get_unassigned(IVector_h &flagArray, IVector_h &B2L_map, IVector_h &partition_flags, int size, int fa_size/*, int rank*/)
{
for (int i = 0; i < size; i++)
{
if (B2L_map[i] < fa_size)
{
if (flagArray[B2L_map[i]] == 0)
{
flagArray[B2L_map[i]] = 1;
partition_flags[i] = 1;
}
}
}
}
template < class TConfig >
void DistributedManagerBase<TConfig>::set_unassigned(IVector_h &partition_flags, IVector_h &partition_renum, IVector_h &B2L_map, IVector_h &renumbering, int size, int max_element, int renum_size/*, int rank*/)
{
for (int i = 0; i < size; i++)
{
if (B2L_map[i] < renum_size)
{
if (partition_flags[i] == 1)
{
renumbering[B2L_map[i]] = max_element + partition_renum[i];
}
B2L_map[i] = renumbering[B2L_map[i]];
}
}
}
/* print manager for target rank to a file or stdout */
template<class TConfig>
void DistributedManagerBase<TConfig>::print(char *f, char *s, int trank)
{
DistributedManagerBase<TConfig> *m = this;
int rank = 0;
int level = 0;
char filename[1024];
FILE *fid = NULL;
int i, j, k, t1, t2;
#ifdef AMGX_WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#endif
//check target rank
if (rank == trank)
{
//check whether to output to stdout or a file
if (f == NULL)
{
fid = stdout;
}
else
{
level = m->A->amg_level_index;
#ifdef _WIN32
_snprintf_s(filename, 1024, 1024, "%s_r%d_l%d.mtx", f, rank, level);
#else
snprintf(filename, 1024, "%s_r%d_l%d.mtx", f, rank, level);
#endif
fid = fopen(filename, "w");
}
cudaDeviceSynchronize();
cudaCheckError();
fprintf(fid, "%s\n", s);
//--- communication info ---
//compare neighbors
t1 = m->neighbors.size();
fprintf(fid, "neighbors %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->neighbors[i];
fprintf(fid, "%d\n", k);
}
//compare B2L_rings
t1 = B2L_rings.size();
fprintf(fid, "B2L_rings %d\n", t1);
for (i = 0; i < t1; i++)
{
t2 = m->B2L_rings[i].size();
fprintf(fid, "B2L_rings-%d [%d]\n", i, t2);
for (j = 0; j < t2; j++)
{
k = m->B2L_rings[i][j];
fprintf(fid, "%d\n", k);
}
}
//compare B2L_maps
t1 = B2L_maps.size();
fprintf(fid, "B2L_maps %d\n", t1);
for (i = 0; i < t1; i++)
{
t2 = m->B2L_maps[i].size();
fprintf(fid, "B2L_maps-%d [%d]\n", i, t2);
for (j = 0; j < t2; j++)
{
k = m->B2L_maps[i][j];
fprintf(fid, "%d\n", k);
}
}
//compare L2H_maps
t1 = L2H_maps.size();
fprintf(fid, "L2H_maps %d\n", t1);
for (i = 0; i < t1; i++)
{
t2 = m->L2H_maps[i].size();
fprintf(fid, "L2H_maps-%d [%d]\n", i, t2);
for (j = 0; j < t2; j++)
{
k = m->L2H_maps[i][j];
fprintf(fid, "%d\n", k);
}
}
//--- matrix info ---
fprintf(fid, "num_rows_global=%ld\n", num_rows_global);
fprintf(fid, "_num_rows_interior=%d\n", m->_num_rows_interior);
fprintf(fid, "_num_rows_owned=%d\n", m->_num_rows_owned);
fprintf(fid, "_num_rows_full=%d\n", m->_num_rows_full);
fprintf(fid, "_num_rows_all=%d\n", m->_num_rows_all);
fprintf(fid, "_num_nz_interior=%d\n", m->_num_nz_interior);
fprintf(fid, "_num_nz_owned=%d\n", m->_num_nz_owned);
fprintf(fid, "_num_nz_full=%d\n", m->_num_nz_full);
fprintf(fid, "_num_nz_all=%d\n", m->_num_nz_all);
//compare # halo rows and halo offsets
fprintf(fid, "# halo rings %d and rows %d\n", m->num_halo_rings(), m->num_halo_rows());
t1 = m->halo_offsets.size();
fprintf(fid, "halo_offsets %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->halo_offsets[i];
fprintf(fid, "%d\n", k);
}
//compare halo ranges
t1 = m->halo_ranges.size();
fprintf(fid, "halo_ranges %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->halo_ranges[i];
fprintf(fid, "%d\n", k);
}
//compare halo ranges (host)
t1 = m->halo_ranges_h.size();
fprintf(fid, "halo_ranges_h %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->halo_ranges_h[i];
fprintf(fid, "%d\n", k);
}
//compare part offsets
t1 = m->part_offsets.size();
fprintf(fid, "part_offsets %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->part_offsets[i];
fprintf(fid, "%d\n", k);
}
//compare part offsets (host)
t1 = m->part_offsets_h.size();
fprintf(fid, "part_offsets_h %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->part_offsets_h[i];
fprintf(fid, "%d\n", k);
}
//compare interior row list
t1 = m->interior_rows_list.size();
fprintf(fid, "interior_rows_list %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->interior_rows_list[i];
fprintf(fid, "%d\n", k);
}
//compare boundary row list
t1 = m->boundary_rows_list.size();
fprintf(fid, "boundary_rows_list %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->boundary_rows_list[i];
fprintf(fid, "%d\n", k);
}
//compare halo1 row list
t1 = m->halo1_rows_list.size();
fprintf(fid, "halo1_rows_list %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->halo1_rows_list[i];
fprintf(fid, "%d\n", k);
}
fprintf(fid, "pointers halo_rows=%p and halo_btl=%p\n", m->halo_rows, m->halo_btl);
//--- packing info ---
//compare local to global map
t1 = m->local_to_global_map.size();
fprintf(fid, "local_to_global_map %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->local_to_global_map[i];
fprintf(fid, "%d\n", k);
}
//compare renumbering
t1 = m->renumbering.size();
fprintf(fid, "renumbering %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->renumbering[i];
fprintf(fid, "%d\n", k);
}
//compare inverse renumbering
t1 = m->inverse_renumbering.size();
fprintf(fid, "inverse_renumbering %d\n", t1);
for (i = 0; i < t1; i++)
{
k = m->inverse_renumbering[i];
fprintf(fid, "%d\n", k);
}
//--- GPU related and miscellaneous info
//streams
fprintf(fid, "streams i=%p, b=%p\n", m->get_int_stream(), m->get_bdy_stream());
//miscellaneous info
int64_t bi = m->base_index(); //inlined function
int np = m->get_num_partitions(); //inlined function
int rp = (int)m->isRootPartition(); //cast from boolean to int
fprintf(fid, "gid=%d,bi=%ld,np=%d,rp=%d,ir=%d,in=%d,bn=%d\n", m->global_id(), bi, np, rp, m->index_range(), m->num_interior_nodes(), m->num_boundary_nodes());
cudaDeviceSynchronize();
cudaGetLastError();
if (fid != stdout)
{
fclose(fid);
}
}
}
/* print manager for target rank to a file or stdout (for all ranks) */
template<class TConfig>
void DistributedManagerBase<TConfig>::printToFile(char *f, char *s)
{
DistributedManagerBase<TConfig> *m = this;
int rank = 0;
#ifdef AMGX_WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#endif
//notice that print will be called with different (target) rank on different ranks/processes
m->print(f, s, rank);
}
/* compare two managers */
template<class TConfig>
int DistributedManagerBase<TConfig>::compare(DistributedManagerBase<TConfig> *m2)
{
DistributedManagerBase<TConfig> *m1 = this;
int i, j, t1, t2;
//compare neighbors
t1 = m1->neighbors.size();
t2 = m2->neighbors.size();
if (t1 != t2)
{
return 1;
}
for (i = 0; i < t1; i++)
{
if (m1->neighbors[i] != m2->neighbors[i])
{
return 2;
}
}
//compare B2L_rings
for (i = 0; i < (m1->neighbors.size()); i++)
{
t1 = m1->B2L_rings[i].size();
t2 = m2->B2L_rings[i].size();
if (t1 != t2)
{
return 3;
}
for (j = 0; j < t1; j++)
{
if (m1->B2L_rings[i][j] != m2->B2L_rings[i][j])
{
return 4;
}
}
}
//compare B2L_maps
t1 = m1->B2L_maps.size();
t2 = m2->B2L_maps.size();
if (t1 != t2)
{
return 5;
}
for (i = 0; i < t1; i++)
{
if (m1->B2L_maps[i] != m2->B2L_maps[i])
{
return 6;
}
}
//compare L2H_maps
t1 = m1->L2H_maps.size();
t2 = m2->L2H_maps.size();
if (t1 != t2)
{
return 7;
}
for (i = 0; i < t1; i++)
{
if (m1->L2H_maps[i] != m2->L2H_maps[i])
{
return 8;
}
}
return 0;
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
DistributedManager< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::~DistributedManager< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >()
{
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
DistributedManager< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::~DistributedManager< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >()
{
}
template <class TConfig>
void DistributedManagerBase<TConfig>::consolidateB2Lmaps(IVector_h_vector &dest_coarse_B2L_maps, IVector_h_vector &coarse_B2L_maps, IVector_h &fine_neigh_to_coarse_neigh, int num_coarse_neighbors, int num_fine_neighbors)
{
consB2Lmaps(dest_coarse_B2L_maps, coarse_B2L_maps, fine_neigh_to_coarse_neigh, num_coarse_neighbors, num_fine_neighbors);
}
template <class TConfig>
void DistributedManagerBase<TConfig>::consolidateB2Lmaps(IVector_d_vector &dest_coarse_B2L_maps, IVector_d_vector &coarse_B2L_maps, IVector_h &fine_neigh_to_coarse_neigh, int num_coarse_neighbors, int num_fine_neighbors)
{
consB2Lmaps(dest_coarse_B2L_maps, coarse_B2L_maps, fine_neigh_to_coarse_neigh, num_coarse_neighbors, num_fine_neighbors);
}
template <class TConfig>
void DistributedManagerBase<TConfig>::consolidateB2LmapsOnRoot(int &num_consolidated_neighbors, IVector_h_vector &consolidated_B2L_maps, IVector_h &consolidated_coarse_ids, IVector_h_vector &dest_coarse_B2L_maps, IVector_h &coarse_neigh_to_fine_part, IVector_h &num_bdy_per_coarse_neigh, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int my_id, int my_destination_part, bool is_root_partition, int num_coarse_neighbors, DistributedComms<TConfig> *comms)
{
consB2LmapsOnRoot(num_consolidated_neighbors, consolidated_B2L_maps, consolidated_coarse_ids, dest_coarse_B2L_maps, coarse_neigh_to_fine_part, num_bdy_per_coarse_neigh, fine_parts_to_consolidate, num_fine_parts_to_consolidate, my_id, my_destination_part, is_root_partition, num_coarse_neighbors, comms);
}
template <class TConfig>
void DistributedManagerBase<TConfig>::consolidateB2LmapsOnRoot(int &num_consolidated_neighbors, IVector_d_vector &consolidated_B2L_maps, IVector_h &consolidated_coarse_ids, IVector_d_vector &dest_coarse_B2L_maps, IVector_h &coarse_neigh_to_fine_part, IVector_h &num_bdy_per_coarse_neigh, IVector_h &fine_parts_to_consolidate, int num_fine_parts_to_consolidate, int my_id, int my_destination_part, bool is_root_partition, int num_coarse_neighbors, DistributedComms<TConfig> *comms)
{
consB2LmapsOnRoot(num_consolidated_neighbors, consolidated_B2L_maps, consolidated_coarse_ids, dest_coarse_B2L_maps, coarse_neigh_to_fine_part, num_bdy_per_coarse_neigh, fine_parts_to_consolidate, num_fine_parts_to_consolidate, my_id, my_destination_part, is_root_partition, num_coarse_neighbors, comms);
}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class DistributedManager<TemplateMode<CASE>::Type >;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template void DistributedManager<TemplateMode<CASE>::Type>::loadDistributedMatrix( \
int, int, const int, const int, const int*, const int *col_indices, const mat_value_type*, int, int, const void*, const MatrixDistribution &dist); \
template void DistributedManager<TemplateMode<CASE>::Type>::loadDistributedMatrix( \
int, int, const int, const int, const int*, const int64_t *col_indices, const mat_value_type*, int, int, const void*, const MatrixDistribution &dist);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class DistributedManagerBase<TemplateMode<CASE>::Type >;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace amgx
|
0508d711b165a9331114b3b678e8220b19c0da37.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <vector>
#include <helper_cuda.h>
#include <sys/time.h>
#include <library_daq.h>
// CUDA = Computer Device Unified Architecture
__global__ void kernel_correct_times(unsigned int *ct);
//
// main code
//
int main(int argc, const char **argv)
{
/////////////////////
// initialise card //
/////////////////////
findCudaDevice(argc, argv);
// initialise CUDA timing
bool use_timing = true;
if( use_timing ){
hipEventCreate(&start);
hipEventCreate(&stop);
}
hipEventCreate(&total_start);
hipEventCreate(&total_stop);
float elapsed_parameters, elapsed_pmts, elapsed_detector, elapsed_vertices,
elapsed_threads, elapsed_tof, elapsed_memory_tofs_dev, elapsed_tofs_copy_dev,
elapsed_input, elapsed_memory_dev, elapsed_copy_dev, elapsed_kernel,
elapsed_threads_candidates, elapsed_candidates_memory_dev, elapsed_candidates_kernel,
elapsed_candidates_copy_host, elapsed_coalesce, elapsed_gates, elapsed_free, elapsed_total,
elapsed_tofs_free, elapsed_reset;
bool use_verbose = true;
////////////////////
// inspect device //
////////////////////
// set: max_n_threads_per_block, max_n_blocks
print_gpu_properties();
///////////////////////
// define parameters //
///////////////////////
if( use_timing )
start_c_clock();
distance_between_vertices = 500.; // cm
time_step_size = 10; // ns
threshold_number_of_pmts = 45;
coalesce_time = 500.; // ns
trigger_gate_up = 950.0; // ns
trigger_gate_down = -400.0 -200; // ns
output_txt = false;
if( use_verbose ){
printf(" --- user parameters \n");
printf(" distance between test vertices = %f cm \n", distance_between_vertices);
printf(" time step size = %d ns \n", time_step_size);
printf(" threshold_number_of_pmts = %d \n", threshold_number_of_pmts);
printf(" coalesce_time = %f ns \n", coalesce_time);
printf(" trigger_gate_up = %f ns \n", trigger_gate_up);
printf(" trigger_gate_down = %f ns \n", trigger_gate_down);
}
if( use_timing )
elapsed_parameters = stop_c_clock();
////////////////
// read PMTs //
////////////////
// set: n_PMTs, PMT_x, PMT_y, PMT_z
if( use_timing )
start_c_clock();
if( !read_the_pmts() ) return 0;
if( use_timing )
elapsed_pmts = stop_c_clock();
/////////////////////
// read detector ////
/////////////////////
// set: detector_height, detector_radius, pmt_radius
if( use_timing )
start_c_clock();
if( !read_the_detector() ) return 0;
if( use_timing )
elapsed_detector = stop_c_clock();
////////////////////////
// make test vertices //
////////////////////////
// set: n_test_vertices, vertex_x, vertex_y, vertex_z
// use: detector_height, detector_radius
if( use_timing )
start_c_clock();
make_test_vertices();
if( use_timing )
elapsed_vertices = stop_c_clock();
//////////////////////////////
// table of times_of_flight //
//////////////////////////////
// set: host_times_of_flight, time_offset
// use: n_test_vertices, vertex_x, vertex_y, vertex_z, n_PMTs, PMT_x, PMT_y, PMT_z
if( use_timing )
start_c_clock();
make_table_of_tofs();
if( use_timing )
elapsed_tof = stop_c_clock();
if( use_timing )
start_cuda_clock();
allocate_tofs_memory_on_device();
if( use_timing )
elapsed_memory_tofs_dev = stop_cuda_clock();
if( use_timing )
start_cuda_clock();
fill_tofs_memory_on_device();
if( use_timing )
elapsed_tofs_copy_dev = stop_cuda_clock();
////////////////
// read input //
////////////////
// set: n_hits, host_ids, host_times, time_offset, n_time_bins
// use: time_offset, n_test_vertices
if( use_timing )
start_c_clock();
if( !read_the_input() ) return 0;
if( use_timing )
elapsed_input = stop_c_clock();
allocate_candidates_memory_on_host();
////////////////////////////////////////////////
// set number of blocks and threads per block //
////////////////////////////////////////////////
// set: number_of_kernel_blocks, number_of_threads_per_block
// use: n_test_vertices
if( use_timing )
start_c_clock();
if( !setup_threads_for_tof_biparallel() ) return 0;
if( use_timing )
elapsed_threads = stop_c_clock();
start_total_cuda_clock();
///////////////////////////////
// allocate memory on device //
///////////////////////////////
if( use_timing )
start_cuda_clock();
allocate_correct_memory_on_device();
if( use_timing )
elapsed_memory_dev = stop_cuda_clock();
//////////////////////////////////////
// copy input into device variables //
//////////////////////////////////////
if( use_timing )
start_cuda_clock();
fill_correct_memory_on_device();
if( use_timing )
elapsed_copy_dev = stop_cuda_clock();
////////////////////
// execute kernel //
////////////////////
if( use_timing )
start_cuda_clock();
printf(" --- execute kernel \n");
hipLaunchKernelGGL(( kernel_correct_times), dim3(number_of_kernel_blocks),dim3(number_of_threads_per_block), 0, 0, device_n_pmts_per_time_bin);
getLastCudaError("correct_kernel execution failed\n");
if( use_timing )
elapsed_kernel = stop_cuda_clock();
/////////////////////////////////////
// find candidates above threshold //
/////////////////////////////////////
if( use_timing )
start_c_clock();
if( !setup_threads_to_find_candidates() ) return 0;
if( use_timing )
elapsed_threads_candidates = stop_c_clock();
if( use_timing )
start_cuda_clock();
allocate_candidates_memory_on_device();
if( use_timing )
elapsed_candidates_memory_dev = stop_cuda_clock();
if( use_timing )
start_cuda_clock();
if( use_verbose )
printf(" --- execute candidates kernel \n");
hipLaunchKernelGGL(( kernel_find_vertex_with_max_npmts_in_timebin), dim3(number_of_kernel_blocks),dim3(number_of_threads_per_block), 0, 0, device_n_pmts_per_time_bin, device_max_number_of_pmts_in_time_bin, device_vertex_with_max_n_pmts);
getLastCudaError("candidates_kernel execution failed\n");
if( use_timing )
elapsed_candidates_kernel = stop_cuda_clock();
if( use_timing )
start_cuda_clock();
if( use_verbose )
printf(" --- copy candidates from device to host \n");
checkCudaErrors(hipMemcpy(host_max_number_of_pmts_in_time_bin,
device_max_number_of_pmts_in_time_bin,
n_time_bins*sizeof(unsigned int),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(host_vertex_with_max_n_pmts,
device_vertex_with_max_n_pmts,
n_time_bins*sizeof(unsigned int),
hipMemcpyDeviceToHost));
if( use_timing )
elapsed_candidates_copy_host = stop_cuda_clock();
for(unsigned int time_bin = 0; time_bin<n_time_bins - 1; time_bin++){ // loop over time bins
// n_time_bins - 1 as we are checking the i and i+1 at the same time
if(host_max_number_of_pmts_in_time_bin[time_bin] > threshold_number_of_pmts) {
if( use_verbose )
printf(" time %f vertex (%f, %f, %f) npmts %d \n", (time_bin + 2)*time_step_size - time_offset, vertex_x[host_vertex_with_max_n_pmts[time_bin]], vertex_y[host_vertex_with_max_n_pmts[time_bin]], vertex_z[host_vertex_with_max_n_pmts[time_bin]], host_max_number_of_pmts_in_time_bin[time_bin]);
candidate_trigger_pair_vertex_time.push_back(std::make_pair(host_vertex_with_max_n_pmts[time_bin],time_bin+2));
candidate_trigger_npmts_in_time_bin.push_back(host_max_number_of_pmts_in_time_bin[time_bin]);
}
}
if( use_verbose )
printf(" n candidates: %d \n", candidate_trigger_pair_vertex_time.size());
///////////////////////
// coalesce triggers //
///////////////////////
if( use_timing )
start_cuda_clock();
coalesce_triggers();
if( use_timing )
elapsed_coalesce = stop_cuda_clock();
//////////////////////////////////
// separate triggers into gates //
//////////////////////////////////
if( use_timing )
start_cuda_clock();
separate_triggers_into_gates();
if( use_timing )
elapsed_gates = stop_cuda_clock();
// deallocate all memory
if( use_timing )
start_cuda_clock();
if( use_verbose )
printf(" --- deallocate memory \n");
free_event_memories();
if( use_timing )
elapsed_free = stop_cuda_clock();
elapsed_total = stop_total_cuda_clock();
if( use_timing )
start_cuda_clock();
if( use_verbose )
printf(" --- deallocate tofs memory \n");
free_global_memories();
if( use_timing )
elapsed_tofs_free = stop_cuda_clock();
// CUDA exit -- needed to flush the buffer which holds printf from each thread
if( use_timing )
start_cuda_clock();
if( use_verbose )
printf(" --- reset device \n");
// hipDeviceReset();
if( use_timing )
elapsed_reset = stop_cuda_clock();
if( use_timing ){
printf(" user parameters time : %f ms \n", elapsed_parameters);
printf(" read pmts execution time : %f ms \n", elapsed_pmts);
printf(" read detector execution time : %f ms \n", elapsed_detector);
printf(" make test vertices execution time : %f ms \n", elapsed_vertices);
printf(" setup threads execution time : %f ms \n", elapsed_threads);
printf(" setup threads candidates execution time : %f ms \n", elapsed_threads_candidates);
printf(" make table of tofs execution time : %f ms \n", elapsed_tof);
printf(" read input execution time : %f ms \n", elapsed_input);
printf(" allocate tofs memory on device execution time : %f ms \n", elapsed_memory_tofs_dev);
printf(" fill tofs memory on device execution time : %f ms \n", elapsed_tofs_copy_dev);
printf(" deallocate tofs memory execution time : %f ms \n", elapsed_tofs_free);
printf(" device reset execution time : %f ms \n", elapsed_reset);
printf(" allocate memory on device execution time : %f ms (%f) \n", elapsed_memory_dev, elapsed_memory_dev/elapsed_total);
printf(" fill memory on device execution time : %f ms (%f) \n", elapsed_copy_dev, elapsed_copy_dev/elapsed_total);
printf(" correct kernel execution time : %f ms (%f) \n", elapsed_kernel, elapsed_kernel/elapsed_total);
printf(" allocate candidates memory on device execution time : %f ms (%f) \n", elapsed_candidates_memory_dev, elapsed_candidates_memory_dev/elapsed_total);
printf(" copy candidates to host execution time : %f ms (%f) \n", elapsed_candidates_copy_host, elapsed_candidates_copy_host/elapsed_total);
printf(" candidates kernel execution time : %f ms (%f) \n", elapsed_candidates_kernel, elapsed_candidates_kernel/elapsed_total);
printf(" coalesce triggers execution time : %f ms (%f) \n", elapsed_coalesce, elapsed_coalesce/elapsed_total);
printf(" separate triggers into gates execution time : %f ms (%f) \n", elapsed_gates, elapsed_gates/elapsed_total);
printf(" deallocate memory execution time : %f ms (%f) \n", elapsed_free, elapsed_free/elapsed_total);
}
printf(" total execution time : %f ms \n", elapsed_total);
return 1;
}
//
// kernel routine
//
// __global__ identifier says it's a kernel function
__global__ void kernel_correct_times(unsigned int *ct){
// get unique id for each thread in each block
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
// printf(" tid %d \n", tid);
// tid runs from 0 to n_test_vertices * n_hits:
// vertex 0 vertex 1 ... vertex m
// (hit 0, ..., hit n; hit 0, ..., hit n; ...; hit 0, ..., hit n);
unsigned int vertex_index = (int)(tid/constant_n_hits);
unsigned int hit_index = tid % constant_n_hits;
// printf( " threadi %d blockdim %d blockid %d, tid %d, vertex_index %d, hit %d \n",
// threadIdx.x, blockDim.x, blockIdx.x, tid,
// vertex_index, hit_index);
// skip if thread is assigned to nonexistent vertex
if( vertex_index >= constant_n_test_vertices ) return;
// skip if thread is assigned to nonexistent hit
if( hit_index >= constant_n_hits ) return;
unsigned int vertex_block = constant_n_time_bins*vertex_index;
unsigned int vertex_block2 = constant_n_PMTs*vertex_index;
atomicAdd(&
ct[
vertex_block
+ int(floor(
(tex1Dfetch(tex_times,hit_index)
- tex1Dfetch(tex_times_of_flight,
vertex_block2
+ tex1Dfetch(tex_ids,hit_index) - 1
)
+ constant_time_offset)/constant_time_step_size
)
)
],1);
// printf( " hit %d (nh %d) id %d t %d; vertex %d (nv %d) tof %f %d \n", hit_index, constant_n_hits, ids[hit_index], t[hit_index], vertex_index, constant_n_test_vertices, tof, ct[time_index]);
return;
}
| 0508d711b165a9331114b3b678e8220b19c0da37.cu | //
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <vector>
#include <helper_cuda.h>
#include <sys/time.h>
#include <library_daq.h>
// CUDA = Computer Device Unified Architecture
__global__ void kernel_correct_times(unsigned int *ct);
//
// main code
//
int main(int argc, const char **argv)
{
/////////////////////
// initialise card //
/////////////////////
findCudaDevice(argc, argv);
// initialise CUDA timing
bool use_timing = true;
if( use_timing ){
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
cudaEventCreate(&total_start);
cudaEventCreate(&total_stop);
float elapsed_parameters, elapsed_pmts, elapsed_detector, elapsed_vertices,
elapsed_threads, elapsed_tof, elapsed_memory_tofs_dev, elapsed_tofs_copy_dev,
elapsed_input, elapsed_memory_dev, elapsed_copy_dev, elapsed_kernel,
elapsed_threads_candidates, elapsed_candidates_memory_dev, elapsed_candidates_kernel,
elapsed_candidates_copy_host, elapsed_coalesce, elapsed_gates, elapsed_free, elapsed_total,
elapsed_tofs_free, elapsed_reset;
bool use_verbose = true;
////////////////////
// inspect device //
////////////////////
// set: max_n_threads_per_block, max_n_blocks
print_gpu_properties();
///////////////////////
// define parameters //
///////////////////////
if( use_timing )
start_c_clock();
distance_between_vertices = 500.; // cm
time_step_size = 10; // ns
threshold_number_of_pmts = 45;
coalesce_time = 500.; // ns
trigger_gate_up = 950.0; // ns
trigger_gate_down = -400.0 -200; // ns
output_txt = false;
if( use_verbose ){
printf(" --- user parameters \n");
printf(" distance between test vertices = %f cm \n", distance_between_vertices);
printf(" time step size = %d ns \n", time_step_size);
printf(" threshold_number_of_pmts = %d \n", threshold_number_of_pmts);
printf(" coalesce_time = %f ns \n", coalesce_time);
printf(" trigger_gate_up = %f ns \n", trigger_gate_up);
printf(" trigger_gate_down = %f ns \n", trigger_gate_down);
}
if( use_timing )
elapsed_parameters = stop_c_clock();
////////////////
// read PMTs //
////////////////
// set: n_PMTs, PMT_x, PMT_y, PMT_z
if( use_timing )
start_c_clock();
if( !read_the_pmts() ) return 0;
if( use_timing )
elapsed_pmts = stop_c_clock();
/////////////////////
// read detector ////
/////////////////////
// set: detector_height, detector_radius, pmt_radius
if( use_timing )
start_c_clock();
if( !read_the_detector() ) return 0;
if( use_timing )
elapsed_detector = stop_c_clock();
////////////////////////
// make test vertices //
////////////////////////
// set: n_test_vertices, vertex_x, vertex_y, vertex_z
// use: detector_height, detector_radius
if( use_timing )
start_c_clock();
make_test_vertices();
if( use_timing )
elapsed_vertices = stop_c_clock();
//////////////////////////////
// table of times_of_flight //
//////////////////////////////
// set: host_times_of_flight, time_offset
// use: n_test_vertices, vertex_x, vertex_y, vertex_z, n_PMTs, PMT_x, PMT_y, PMT_z
if( use_timing )
start_c_clock();
make_table_of_tofs();
if( use_timing )
elapsed_tof = stop_c_clock();
if( use_timing )
start_cuda_clock();
allocate_tofs_memory_on_device();
if( use_timing )
elapsed_memory_tofs_dev = stop_cuda_clock();
if( use_timing )
start_cuda_clock();
fill_tofs_memory_on_device();
if( use_timing )
elapsed_tofs_copy_dev = stop_cuda_clock();
////////////////
// read input //
////////////////
// set: n_hits, host_ids, host_times, time_offset, n_time_bins
// use: time_offset, n_test_vertices
if( use_timing )
start_c_clock();
if( !read_the_input() ) return 0;
if( use_timing )
elapsed_input = stop_c_clock();
allocate_candidates_memory_on_host();
////////////////////////////////////////////////
// set number of blocks and threads per block //
////////////////////////////////////////////////
// set: number_of_kernel_blocks, number_of_threads_per_block
// use: n_test_vertices
if( use_timing )
start_c_clock();
if( !setup_threads_for_tof_biparallel() ) return 0;
if( use_timing )
elapsed_threads = stop_c_clock();
start_total_cuda_clock();
///////////////////////////////
// allocate memory on device //
///////////////////////////////
if( use_timing )
start_cuda_clock();
allocate_correct_memory_on_device();
if( use_timing )
elapsed_memory_dev = stop_cuda_clock();
//////////////////////////////////////
// copy input into device variables //
//////////////////////////////////////
if( use_timing )
start_cuda_clock();
fill_correct_memory_on_device();
if( use_timing )
elapsed_copy_dev = stop_cuda_clock();
////////////////////
// execute kernel //
////////////////////
if( use_timing )
start_cuda_clock();
printf(" --- execute kernel \n");
kernel_correct_times<<<number_of_kernel_blocks,number_of_threads_per_block>>>(device_n_pmts_per_time_bin);
getLastCudaError("correct_kernel execution failed\n");
if( use_timing )
elapsed_kernel = stop_cuda_clock();
/////////////////////////////////////
// find candidates above threshold //
/////////////////////////////////////
if( use_timing )
start_c_clock();
if( !setup_threads_to_find_candidates() ) return 0;
if( use_timing )
elapsed_threads_candidates = stop_c_clock();
if( use_timing )
start_cuda_clock();
allocate_candidates_memory_on_device();
if( use_timing )
elapsed_candidates_memory_dev = stop_cuda_clock();
if( use_timing )
start_cuda_clock();
if( use_verbose )
printf(" --- execute candidates kernel \n");
kernel_find_vertex_with_max_npmts_in_timebin<<<number_of_kernel_blocks,number_of_threads_per_block>>>(device_n_pmts_per_time_bin, device_max_number_of_pmts_in_time_bin, device_vertex_with_max_n_pmts);
getLastCudaError("candidates_kernel execution failed\n");
if( use_timing )
elapsed_candidates_kernel = stop_cuda_clock();
if( use_timing )
start_cuda_clock();
if( use_verbose )
printf(" --- copy candidates from device to host \n");
checkCudaErrors(cudaMemcpy(host_max_number_of_pmts_in_time_bin,
device_max_number_of_pmts_in_time_bin,
n_time_bins*sizeof(unsigned int),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(host_vertex_with_max_n_pmts,
device_vertex_with_max_n_pmts,
n_time_bins*sizeof(unsigned int),
cudaMemcpyDeviceToHost));
if( use_timing )
elapsed_candidates_copy_host = stop_cuda_clock();
for(unsigned int time_bin = 0; time_bin<n_time_bins - 1; time_bin++){ // loop over time bins
// n_time_bins - 1 as we are checking the i and i+1 at the same time
if(host_max_number_of_pmts_in_time_bin[time_bin] > threshold_number_of_pmts) {
if( use_verbose )
printf(" time %f vertex (%f, %f, %f) npmts %d \n", (time_bin + 2)*time_step_size - time_offset, vertex_x[host_vertex_with_max_n_pmts[time_bin]], vertex_y[host_vertex_with_max_n_pmts[time_bin]], vertex_z[host_vertex_with_max_n_pmts[time_bin]], host_max_number_of_pmts_in_time_bin[time_bin]);
candidate_trigger_pair_vertex_time.push_back(std::make_pair(host_vertex_with_max_n_pmts[time_bin],time_bin+2));
candidate_trigger_npmts_in_time_bin.push_back(host_max_number_of_pmts_in_time_bin[time_bin]);
}
}
if( use_verbose )
printf(" n candidates: %d \n", candidate_trigger_pair_vertex_time.size());
///////////////////////
// coalesce triggers //
///////////////////////
if( use_timing )
start_cuda_clock();
coalesce_triggers();
if( use_timing )
elapsed_coalesce = stop_cuda_clock();
//////////////////////////////////
// separate triggers into gates //
//////////////////////////////////
if( use_timing )
start_cuda_clock();
separate_triggers_into_gates();
if( use_timing )
elapsed_gates = stop_cuda_clock();
// deallocate all memory
if( use_timing )
start_cuda_clock();
if( use_verbose )
printf(" --- deallocate memory \n");
free_event_memories();
if( use_timing )
elapsed_free = stop_cuda_clock();
elapsed_total = stop_total_cuda_clock();
if( use_timing )
start_cuda_clock();
if( use_verbose )
printf(" --- deallocate tofs memory \n");
free_global_memories();
if( use_timing )
elapsed_tofs_free = stop_cuda_clock();
// CUDA exit -- needed to flush the buffer which holds printf from each thread
if( use_timing )
start_cuda_clock();
if( use_verbose )
printf(" --- reset device \n");
// cudaDeviceReset();
if( use_timing )
elapsed_reset = stop_cuda_clock();
if( use_timing ){
printf(" user parameters time : %f ms \n", elapsed_parameters);
printf(" read pmts execution time : %f ms \n", elapsed_pmts);
printf(" read detector execution time : %f ms \n", elapsed_detector);
printf(" make test vertices execution time : %f ms \n", elapsed_vertices);
printf(" setup threads execution time : %f ms \n", elapsed_threads);
printf(" setup threads candidates execution time : %f ms \n", elapsed_threads_candidates);
printf(" make table of tofs execution time : %f ms \n", elapsed_tof);
printf(" read input execution time : %f ms \n", elapsed_input);
printf(" allocate tofs memory on device execution time : %f ms \n", elapsed_memory_tofs_dev);
printf(" fill tofs memory on device execution time : %f ms \n", elapsed_tofs_copy_dev);
printf(" deallocate tofs memory execution time : %f ms \n", elapsed_tofs_free);
printf(" device reset execution time : %f ms \n", elapsed_reset);
printf(" allocate memory on device execution time : %f ms (%f) \n", elapsed_memory_dev, elapsed_memory_dev/elapsed_total);
printf(" fill memory on device execution time : %f ms (%f) \n", elapsed_copy_dev, elapsed_copy_dev/elapsed_total);
printf(" correct kernel execution time : %f ms (%f) \n", elapsed_kernel, elapsed_kernel/elapsed_total);
printf(" allocate candidates memory on device execution time : %f ms (%f) \n", elapsed_candidates_memory_dev, elapsed_candidates_memory_dev/elapsed_total);
printf(" copy candidates to host execution time : %f ms (%f) \n", elapsed_candidates_copy_host, elapsed_candidates_copy_host/elapsed_total);
printf(" candidates kernel execution time : %f ms (%f) \n", elapsed_candidates_kernel, elapsed_candidates_kernel/elapsed_total);
printf(" coalesce triggers execution time : %f ms (%f) \n", elapsed_coalesce, elapsed_coalesce/elapsed_total);
printf(" separate triggers into gates execution time : %f ms (%f) \n", elapsed_gates, elapsed_gates/elapsed_total);
printf(" deallocate memory execution time : %f ms (%f) \n", elapsed_free, elapsed_free/elapsed_total);
}
printf(" total execution time : %f ms \n", elapsed_total);
return 1;
}
//
// kernel routine
//
// __global__ identifier says it's a kernel function
__global__ void kernel_correct_times(unsigned int *ct){
// get unique id for each thread in each block
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
// printf(" tid %d \n", tid);
// tid runs from 0 to n_test_vertices * n_hits:
// vertex 0 vertex 1 ... vertex m
// (hit 0, ..., hit n; hit 0, ..., hit n; ...; hit 0, ..., hit n);
unsigned int vertex_index = (int)(tid/constant_n_hits);
unsigned int hit_index = tid % constant_n_hits;
// printf( " threadi %d blockdim %d blockid %d, tid %d, vertex_index %d, hit %d \n",
// threadIdx.x, blockDim.x, blockIdx.x, tid,
// vertex_index, hit_index);
// skip if thread is assigned to nonexistent vertex
if( vertex_index >= constant_n_test_vertices ) return;
// skip if thread is assigned to nonexistent hit
if( hit_index >= constant_n_hits ) return;
unsigned int vertex_block = constant_n_time_bins*vertex_index;
unsigned int vertex_block2 = constant_n_PMTs*vertex_index;
atomicAdd(&
ct[
vertex_block
+ int(floor(
(tex1Dfetch(tex_times,hit_index)
- tex1Dfetch(tex_times_of_flight,
vertex_block2
+ tex1Dfetch(tex_ids,hit_index) - 1
)
+ constant_time_offset)/constant_time_step_size
)
)
],1);
// printf( " hit %d (nh %d) id %d t %d; vertex %d (nv %d) tof %f %d \n", hit_index, constant_n_hits, ids[hit_index], t[hit_index], vertex_index, constant_n_test_vertices, tof, ct[time_index]);
return;
}
|
a41b77459e1d94466396e0374a18ef5852b0ec9a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// The file has been adapted from the two files:
// https://github.com/laekov/fastmoe/blob/master/cuda/local_exchange.cu
// https://github.com/laekov/fastmoe/blob/master/cuda/local_exchange.cuh
// Git commit hash: 295a615aacce7e54a37e7935274ba15e901c78e4
// We retain the following license from the original files:
// Copyright 2021, Jiaao He. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License").
#include "paddle/fluid/operators/number_count_op.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
#define CEIL(_x_, _y_) (((_x_)-1) / (_y_) + 1)
#define PERTHREAD_EXPERTS 256
#define WARP_SIZE 32
const int CUDA_NUM_THREADS = 512;
static inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename T>
__global__ void initialize_zero_kernel(T* data, const int length) {
CUDA_KERNEL_LOOP(idx, length) { data[idx] = static_cast<T>(0); }
}
template <typename T>
__global__ void NumberCount(const T* numbers,
T* number_count,
int64_t batch_size,
int upper_range) {
int res_tmp[PERTHREAD_EXPERTS] = {0};
int expert_min = blockIdx.x * PERTHREAD_EXPERTS;
int expert_max = expert_min + PERTHREAD_EXPERTS;
if (expert_max > upper_range) {
expert_max = upper_range;
}
for (int i = threadIdx.x; i < batch_size; i += blockDim.x) {
T idx = numbers[i];
if (idx == -1) {
continue;
}
if (idx < expert_min || idx >= expert_max) {
continue;
}
res_tmp[idx - expert_min] += 1;
}
for (int i = expert_min; i < expert_max; ++i) {
int x = res_tmp[i - expert_min];
#pragma unroll
for (int j = 1; j < WARP_SIZE; j <<= 1) {
#ifdef __HIPCC__
x = x + __shfl_down(x, j);
#else
x = x + __shfl_down_sync(-1u, x, j);
#endif
}
if (threadIdx.x % WARP_SIZE == 0) {
phi::CudaAtomicAdd(number_count + i, x);
}
}
}
template <typename T, typename DeviceContext>
class NumberCountOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto numbers = context.Input<phi::DenseTensor>("numbers");
auto upper_range = context.Attr<int>("upper_range");
auto number_count = context.Output<phi::DenseTensor>("Out");
int64_t batch_size = numbers->numel();
auto place = context.GetPlace();
const auto& dev_ctx = context.template device_context<phi::GPUContext>();
framework::DDim out_dims = phi::make_ddim({upper_range});
auto out_data = number_count->mutable_data<T>(out_dims, place);
const T* gate_data = numbers->data<T>();
hipLaunchKernelGGL(( initialize_zero_kernel<T>)
, dim3(GET_BLOCKS(upper_range)), dim3(CUDA_NUM_THREADS), 0, dev_ctx.stream(),
out_data, upper_range);
hipLaunchKernelGGL(( NumberCount<T>)
, dim3(CEIL(upper_range, PERTHREAD_EXPERTS)), dim3(256), 0, dev_ctx.stream(),
gate_data, out_data, batch_size, upper_range);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
PD_REGISTER_STRUCT_KERNEL(
number_count, GPU, ALL_LAYOUT, ops::NumberCountOpCUDAKernel, int64_t) {}
| a41b77459e1d94466396e0374a18ef5852b0ec9a.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// The file has been adapted from the two files:
// https://github.com/laekov/fastmoe/blob/master/cuda/local_exchange.cu
// https://github.com/laekov/fastmoe/blob/master/cuda/local_exchange.cuh
// Git commit hash: 295a615aacce7e54a37e7935274ba15e901c78e4
// We retain the following license from the original files:
// Copyright 2021, Jiaao He. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License").
#include "paddle/fluid/operators/number_count_op.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
#define CEIL(_x_, _y_) (((_x_)-1) / (_y_) + 1)
#define PERTHREAD_EXPERTS 256
#define WARP_SIZE 32
const int CUDA_NUM_THREADS = 512;
static inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename T>
__global__ void initialize_zero_kernel(T* data, const int length) {
CUDA_KERNEL_LOOP(idx, length) { data[idx] = static_cast<T>(0); }
}
template <typename T>
__global__ void NumberCount(const T* numbers,
T* number_count,
int64_t batch_size,
int upper_range) {
int res_tmp[PERTHREAD_EXPERTS] = {0};
int expert_min = blockIdx.x * PERTHREAD_EXPERTS;
int expert_max = expert_min + PERTHREAD_EXPERTS;
if (expert_max > upper_range) {
expert_max = upper_range;
}
for (int i = threadIdx.x; i < batch_size; i += blockDim.x) {
T idx = numbers[i];
if (idx == -1) {
continue;
}
if (idx < expert_min || idx >= expert_max) {
continue;
}
res_tmp[idx - expert_min] += 1;
}
for (int i = expert_min; i < expert_max; ++i) {
int x = res_tmp[i - expert_min];
#pragma unroll
for (int j = 1; j < WARP_SIZE; j <<= 1) {
#ifdef __HIPCC__
x = x + __shfl_down(x, j);
#else
x = x + __shfl_down_sync(-1u, x, j);
#endif
}
if (threadIdx.x % WARP_SIZE == 0) {
phi::CudaAtomicAdd(number_count + i, x);
}
}
}
template <typename T, typename DeviceContext>
class NumberCountOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto numbers = context.Input<phi::DenseTensor>("numbers");
auto upper_range = context.Attr<int>("upper_range");
auto number_count = context.Output<phi::DenseTensor>("Out");
int64_t batch_size = numbers->numel();
auto place = context.GetPlace();
const auto& dev_ctx = context.template device_context<phi::GPUContext>();
framework::DDim out_dims = phi::make_ddim({upper_range});
auto out_data = number_count->mutable_data<T>(out_dims, place);
const T* gate_data = numbers->data<T>();
initialize_zero_kernel<T>
<<<GET_BLOCKS(upper_range), CUDA_NUM_THREADS, 0, dev_ctx.stream()>>>(
out_data, upper_range);
NumberCount<T>
<<<CEIL(upper_range, PERTHREAD_EXPERTS), 256, 0, dev_ctx.stream()>>>(
gate_data, out_data, batch_size, upper_range);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
PD_REGISTER_STRUCT_KERNEL(
number_count, GPU, ALL_LAYOUT, ops::NumberCountOpCUDAKernel, int64_t) {}
|
eb38d374fcace59dc72384140ca499146fde934a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017-2018 by Contributors
*/
#include <dmlc/parameter.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <xgboost/data.h>
#include <xgboost/predictor.h>
#include <xgboost/tree_model.h>
#include <xgboost/tree_updater.h>
#include <memory>
#include "../common/common.h"
#include "../common/device_helpers.cuh"
#include "../common/host_device_vector.h"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
/*! \brief prediction parameters */
struct GPUPredictionParam : public dmlc::Parameter<GPUPredictionParam> {
int gpu_id;
int n_gpus;
bool silent;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUPredictionParam) {
DMLC_DECLARE_FIELD(gpu_id).set_lower_bound(0).set_default(0).describe(
"Device ordinal for GPU prediction.");
DMLC_DECLARE_FIELD(n_gpus).set_lower_bound(-1).set_default(1).describe(
"Number of devices to use for prediction.");
DMLC_DECLARE_FIELD(silent).set_default(false).describe(
"Do not print information during trainig.");
}
};
DMLC_REGISTER_PARAMETER(GPUPredictionParam);
template <typename IterT>
void IncrementOffset(IterT begin_itr, IterT end_itr, size_t amount) {
thrust::transform(begin_itr, end_itr, begin_itr,
[=] __device__(size_t elem) { return elem + amount; });
}
/**
* \struct DevicePredictionNode
*
* \brief Packed 16 byte representation of a tree node for use in device
* prediction
*/
struct DevicePredictionNode {
XGBOOST_DEVICE DevicePredictionNode()
: fidx(-1), left_child_idx(-1), right_child_idx(-1) {}
union NodeValue {
float leaf_weight;
float fvalue;
};
int fidx;
int left_child_idx;
int right_child_idx;
NodeValue val;
DevicePredictionNode(const RegTree::Node& n) { // NOLINT
static_assert(sizeof(DevicePredictionNode) == 16, "Size is not 16 bytes");
this->left_child_idx = n.LeftChild();
this->right_child_idx = n.RightChild();
this->fidx = n.SplitIndex();
if (n.DefaultLeft()) {
fidx |= (1U << 31);
}
if (n.IsLeaf()) {
this->val.leaf_weight = n.LeafValue();
} else {
this->val.fvalue = n.SplitCond();
}
}
XGBOOST_DEVICE bool IsLeaf() const { return left_child_idx == -1; }
XGBOOST_DEVICE int GetFidx() const { return fidx & ((1U << 31) - 1U); }
XGBOOST_DEVICE bool MissingLeft() const { return (fidx >> 31) != 0; }
XGBOOST_DEVICE int MissingIdx() const {
if (MissingLeft()) {
return this->left_child_idx;
} else {
return this->right_child_idx;
}
}
XGBOOST_DEVICE float GetFvalue() const { return val.fvalue; }
XGBOOST_DEVICE float GetWeight() const { return val.leaf_weight; }
};
struct ElementLoader {
bool use_shared;
common::Span<const size_t> d_row_ptr;
common::Span<const Entry> d_data;
int num_features;
float* smem;
size_t entry_start;
__device__ ElementLoader(bool use_shared, common::Span<const size_t> row_ptr,
common::Span<const Entry> entry, int num_features,
float* smem, int num_rows, size_t entry_start)
: use_shared(use_shared),
d_row_ptr(row_ptr),
d_data(entry),
num_features(num_features),
smem(smem),
entry_start(entry_start) {
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = d_row_ptr[global_idx];
bst_uint elem_end = d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = d_data[elem_idx - entry_start];
smem[threadIdx.x * num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetFvalue(int ridx, int fidx) {
if (use_shared) {
return smem[threadIdx.x * num_features + fidx];
} else {
// Binary search
auto begin_ptr = d_data.begin() + (d_row_ptr[ridx] - entry_start);
auto end_ptr = d_data.begin() + (d_row_ptr[ridx + 1] - entry_start);
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
}
};
__device__ float GetLeafWeight(bst_uint ridx, const DevicePredictionNode* tree,
ElementLoader* loader) {
DevicePredictionNode n = tree[0];
while (!n.IsLeaf()) {
float fvalue = loader->GetFvalue(ridx, n.GetFidx());
// Missing value
if (isnan(fvalue)) {
n = tree[n.MissingIdx()];
} else {
if (fvalue < n.GetFvalue()) {
n = tree[n.left_child_idx];
} else {
n = tree[n.right_child_idx];
}
}
}
return n.GetWeight();
}
template <int BLOCK_THREADS>
__global__ void PredictKernel(common::Span<const DevicePredictionNode> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t> d_tree_segments,
common::Span<int> d_tree_group,
common::Span<const size_t> d_row_ptr,
common::Span<const Entry> d_data, size_t tree_begin,
size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start,
bool use_shared, int num_group) {
extern __shared__ float smem[];
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
ElementLoader loader(use_shared, d_row_ptr, d_data, num_features, smem,
num_rows, entry_start);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
const DevicePredictionNode* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
sum += GetLeafWeight(global_idx, d_tree, &loader);
}
d_out_predictions[global_idx] += sum;
} else {
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
const DevicePredictionNode* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight(global_idx, d_tree, &loader);
}
}
}
class GPUPredictor : public xgboost::Predictor {
protected:
struct DevicePredictionCacheEntry {
std::shared_ptr<DMatrix> data;
HostDeviceVector<bst_float> predictions;
};
private:
void DeviceOffsets(const HostDeviceVector<size_t>& data, std::vector<size_t>* out_offsets) {
auto& offsets = *out_offsets;
offsets.resize(devices_.Size() + 1);
offsets[0] = 0;
#pragma omp parallel for schedule(static, 1) if (devices_.Size() > 1)
for (int shard = 0; shard < devices_.Size(); ++shard) {
int device = devices_.DeviceId(shard);
auto data_span = data.DeviceSpan(device);
dh::safe_cuda(hipSetDevice(device));
// copy the last element from every shard
dh::safe_cuda(hipMemcpy(&offsets.at(shard + 1),
&data_span[data_span.size()-1],
sizeof(size_t), hipMemcpyDeviceToHost));
}
}
struct DeviceShard {
DeviceShard() : device_(-1) {}
void Init(int device) {
this->device_ = device;
max_shared_memory_bytes = dh::MaxSharedMemory(this->device_);
}
void PredictInternal
(const SparsePage& batch, const MetaInfo& info,
HostDeviceVector<bst_float>* predictions,
const gbm::GBTreeModel& model,
const thrust::host_vector<size_t>& h_tree_segments,
const thrust::host_vector<DevicePredictionNode>& h_nodes,
size_t tree_begin, size_t tree_end) {
dh::safe_cuda(hipSetDevice(device_));
nodes.resize(h_nodes.size());
dh::safe_cuda(hipMemcpy(dh::Raw(nodes), h_nodes.data(),
sizeof(DevicePredictionNode) * h_nodes.size(),
hipMemcpyHostToDevice));
tree_segments.resize(h_tree_segments.size());
dh::safe_cuda(hipMemcpy(dh::Raw(tree_segments), h_tree_segments.data(),
sizeof(size_t) * h_tree_segments.size(),
hipMemcpyHostToDevice));
tree_group.resize(model.tree_info.size());
dh::safe_cuda(hipMemcpy(dh::Raw(tree_group), model.tree_info.data(),
sizeof(int) * model.tree_info.size(),
hipMemcpyHostToDevice));
const int BLOCK_THREADS = 128;
size_t num_rows = batch.offset.DeviceSize(device_) - 1;
if (num_rows < 1) { return; }
const int GRID_SIZE = static_cast<int>(dh::DivRoundUp(num_rows, BLOCK_THREADS));
int shared_memory_bytes = static_cast<int>
(sizeof(float) * info.num_col_ * BLOCK_THREADS);
bool use_shared = true;
if (shared_memory_bytes > max_shared_memory_bytes) {
shared_memory_bytes = 0;
use_shared = false;
}
const auto& data_distr = batch.data.Distribution();
size_t entry_start = data_distr.ShardStart(batch.data.Size(),
data_distr.Devices().Index(device_));
hipLaunchKernelGGL(( PredictKernel<BLOCK_THREADS>), dim3(GRID_SIZE), dim3(BLOCK_THREADS), shared_memory_bytes, 0,
dh::ToSpan(nodes), predictions->DeviceSpan(device_), dh::ToSpan(tree_segments),
dh::ToSpan(tree_group), batch.offset.DeviceSpan(device_),
batch.data.DeviceSpan(device_), tree_begin, tree_end, info.num_col_,
num_rows, entry_start, use_shared, model.param.num_output_group);
dh::safe_cuda(hipGetLastError());
dh::safe_cuda(hipDeviceSynchronize());
}
int device_;
thrust::device_vector<DevicePredictionNode> nodes;
thrust::device_vector<size_t> tree_segments;
thrust::device_vector<int> tree_group;
size_t max_shared_memory_bytes;
};
void DevicePredictInternal(DMatrix* dmat,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) {
if (tree_end - tree_begin == 0) { return; }
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
thrust::host_vector<size_t> h_tree_segments;
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
thrust::host_vector<DevicePredictionNode> h_nodes(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
std::copy(src_nodes.begin(), src_nodes.end(),
h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]);
}
size_t i_batch = 0;
for (const auto &batch : dmat->GetRowBatches()) {
CHECK_EQ(i_batch, 0) << "External memory not supported";
size_t n_rows = batch.offset.Size() - 1;
// out_preds have been resharded and resized in InitOutPredictions()
batch.offset.Reshard(GPUDistribution::Overlap(devices_, 1));
std::vector<size_t> device_offsets;
DeviceOffsets(batch.offset, &device_offsets);
batch.data.Reshard(GPUDistribution::Explicit(devices_, device_offsets));
dh::ExecuteIndexShards(&shards, [&](int idx, DeviceShard& shard) {
shard.PredictInternal(batch, dmat->Info(), out_preds, model,
h_tree_segments, h_nodes, tree_begin, tree_end);
});
i_batch++;
}
}
public:
GPUPredictor() : cpu_predictor(Predictor::Create("cpu_predictor")) {}
void PredictBatch(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, int tree_begin,
unsigned ntree_limit = 0) override {
GPUSet devices = GPUSet::All(
param.gpu_id, param.n_gpus, dmat->Info().num_row_);
ConfigureShards(devices);
if (this->PredictFromCache(dmat, out_preds, model, ntree_limit)) {
return;
}
this->InitOutPredictions(dmat->Info(), out_preds, model);
int tree_end = ntree_limit * model.param.num_output_group;
if (ntree_limit == 0 || ntree_limit > model.trees.size()) {
tree_end = static_cast<unsigned>(model.trees.size());
}
DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
protected:
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const {
size_t n_classes = model.param.num_output_group;
size_t n = n_classes * info.num_row_;
const HostDeviceVector<bst_float>& base_margin = info.base_margin_;
out_preds->Reshard(GPUDistribution::Granular(devices_, n_classes));
out_preds->Resize(n);
if (base_margin.Size() != 0) {
CHECK_EQ(out_preds->Size(), n);
out_preds->Copy(base_margin);
} else {
out_preds->Fill(model.base_margin);
}
}
bool PredictFromCache(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit) {
if (ntree_limit == 0 ||
ntree_limit * model.param.num_output_group >= model.trees.size()) {
auto it = cache_.find(dmat);
if (it != cache_.end()) {
const HostDeviceVector<bst_float>& y = it->second.predictions;
if (y.Size() != 0) {
out_preds->Reshard(y.Distribution());
out_preds->Resize(y.Size());
out_preds->Copy(y);
return true;
}
}
}
return false;
}
void UpdatePredictionCache(
const gbm::GBTreeModel& model,
std::vector<std::unique_ptr<TreeUpdater>>* updaters,
int num_new_trees) override {
auto old_ntree = model.trees.size() - num_new_trees;
// update cache entry
for (auto& kv : cache_) {
PredictionCacheEntry& e = kv.second;
DMatrix* dmat = kv.first;
HostDeviceVector<bst_float>& predictions = e.predictions;
if (predictions.Size() == 0) {
this->InitOutPredictions(dmat->Info(), &predictions, model);
}
if (model.param.num_output_group == 1 && updaters->size() > 0 &&
num_new_trees == 1 &&
updaters->back()->UpdatePredictionCache(e.data.get(), &predictions)) {
// do nothing
} else {
DevicePredictInternal(dmat, &predictions, model, old_ntree, model.trees.size());
}
}
}
void PredictInstance(const SparsePage::Inst& inst,
std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit,
unsigned root_index) override {
cpu_predictor->PredictInstance(inst, out_preds, model, root_index);
}
void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model,
unsigned ntree_limit) override {
cpu_predictor->PredictLeaf(p_fmat, out_preds, model, ntree_limit);
}
void PredictContribution(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned ntree_limit,
bool approximate, int condition,
unsigned condition_feature) override {
cpu_predictor->PredictContribution(p_fmat, out_contribs, model, ntree_limit,
approximate, condition,
condition_feature);
}
void PredictInteractionContributions(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned ntree_limit,
bool approximate) override {
cpu_predictor->PredictInteractionContributions(p_fmat, out_contribs, model,
ntree_limit, approximate);
}
void Init(const std::vector<std::pair<std::string, std::string>>& cfg,
const std::vector<std::shared_ptr<DMatrix>>& cache) override {
Predictor::Init(cfg, cache);
cpu_predictor->Init(cfg, cache);
param.InitAllowUnknown(cfg);
GPUSet devices = GPUSet::All(param.gpu_id, param.n_gpus);
ConfigureShards(devices);
}
private:
/*! \brief Re configure shards when GPUSet is changed. */
void ConfigureShards(GPUSet devices) {
if (devices_ == devices) return;
devices_ = devices;
shards.clear();
shards.resize(devices_.Size());
dh::ExecuteIndexShards(&shards, [=](size_t i, DeviceShard& shard){
shard.Init(devices_.DeviceId(i));
});
}
GPUPredictionParam param;
std::unique_ptr<Predictor> cpu_predictor;
std::vector<DeviceShard> shards;
GPUSet devices_;
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([]() { return new GPUPredictor(); });
} // namespace predictor
} // namespace xgboost
| eb38d374fcace59dc72384140ca499146fde934a.cu | /*!
* Copyright 2017-2018 by Contributors
*/
#include <dmlc/parameter.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <xgboost/data.h>
#include <xgboost/predictor.h>
#include <xgboost/tree_model.h>
#include <xgboost/tree_updater.h>
#include <memory>
#include "../common/common.h"
#include "../common/device_helpers.cuh"
#include "../common/host_device_vector.h"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
/*! \brief prediction parameters */
struct GPUPredictionParam : public dmlc::Parameter<GPUPredictionParam> {
int gpu_id;
int n_gpus;
bool silent;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUPredictionParam) {
DMLC_DECLARE_FIELD(gpu_id).set_lower_bound(0).set_default(0).describe(
"Device ordinal for GPU prediction.");
DMLC_DECLARE_FIELD(n_gpus).set_lower_bound(-1).set_default(1).describe(
"Number of devices to use for prediction.");
DMLC_DECLARE_FIELD(silent).set_default(false).describe(
"Do not print information during trainig.");
}
};
DMLC_REGISTER_PARAMETER(GPUPredictionParam);
template <typename IterT>
void IncrementOffset(IterT begin_itr, IterT end_itr, size_t amount) {
thrust::transform(begin_itr, end_itr, begin_itr,
[=] __device__(size_t elem) { return elem + amount; });
}
/**
* \struct DevicePredictionNode
*
* \brief Packed 16 byte representation of a tree node for use in device
* prediction
*/
struct DevicePredictionNode {
XGBOOST_DEVICE DevicePredictionNode()
: fidx(-1), left_child_idx(-1), right_child_idx(-1) {}
union NodeValue {
float leaf_weight;
float fvalue;
};
int fidx;
int left_child_idx;
int right_child_idx;
NodeValue val;
DevicePredictionNode(const RegTree::Node& n) { // NOLINT
static_assert(sizeof(DevicePredictionNode) == 16, "Size is not 16 bytes");
this->left_child_idx = n.LeftChild();
this->right_child_idx = n.RightChild();
this->fidx = n.SplitIndex();
if (n.DefaultLeft()) {
fidx |= (1U << 31);
}
if (n.IsLeaf()) {
this->val.leaf_weight = n.LeafValue();
} else {
this->val.fvalue = n.SplitCond();
}
}
XGBOOST_DEVICE bool IsLeaf() const { return left_child_idx == -1; }
XGBOOST_DEVICE int GetFidx() const { return fidx & ((1U << 31) - 1U); }
XGBOOST_DEVICE bool MissingLeft() const { return (fidx >> 31) != 0; }
XGBOOST_DEVICE int MissingIdx() const {
if (MissingLeft()) {
return this->left_child_idx;
} else {
return this->right_child_idx;
}
}
XGBOOST_DEVICE float GetFvalue() const { return val.fvalue; }
XGBOOST_DEVICE float GetWeight() const { return val.leaf_weight; }
};
struct ElementLoader {
bool use_shared;
common::Span<const size_t> d_row_ptr;
common::Span<const Entry> d_data;
int num_features;
float* smem;
size_t entry_start;
__device__ ElementLoader(bool use_shared, common::Span<const size_t> row_ptr,
common::Span<const Entry> entry, int num_features,
float* smem, int num_rows, size_t entry_start)
: use_shared(use_shared),
d_row_ptr(row_ptr),
d_data(entry),
num_features(num_features),
smem(smem),
entry_start(entry_start) {
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = d_row_ptr[global_idx];
bst_uint elem_end = d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = d_data[elem_idx - entry_start];
smem[threadIdx.x * num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetFvalue(int ridx, int fidx) {
if (use_shared) {
return smem[threadIdx.x * num_features + fidx];
} else {
// Binary search
auto begin_ptr = d_data.begin() + (d_row_ptr[ridx] - entry_start);
auto end_ptr = d_data.begin() + (d_row_ptr[ridx + 1] - entry_start);
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
}
};
__device__ float GetLeafWeight(bst_uint ridx, const DevicePredictionNode* tree,
ElementLoader* loader) {
DevicePredictionNode n = tree[0];
while (!n.IsLeaf()) {
float fvalue = loader->GetFvalue(ridx, n.GetFidx());
// Missing value
if (isnan(fvalue)) {
n = tree[n.MissingIdx()];
} else {
if (fvalue < n.GetFvalue()) {
n = tree[n.left_child_idx];
} else {
n = tree[n.right_child_idx];
}
}
}
return n.GetWeight();
}
template <int BLOCK_THREADS>
__global__ void PredictKernel(common::Span<const DevicePredictionNode> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t> d_tree_segments,
common::Span<int> d_tree_group,
common::Span<const size_t> d_row_ptr,
common::Span<const Entry> d_data, size_t tree_begin,
size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start,
bool use_shared, int num_group) {
extern __shared__ float smem[];
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
ElementLoader loader(use_shared, d_row_ptr, d_data, num_features, smem,
num_rows, entry_start);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
const DevicePredictionNode* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
sum += GetLeafWeight(global_idx, d_tree, &loader);
}
d_out_predictions[global_idx] += sum;
} else {
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
const DevicePredictionNode* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight(global_idx, d_tree, &loader);
}
}
}
class GPUPredictor : public xgboost::Predictor {
protected:
struct DevicePredictionCacheEntry {
std::shared_ptr<DMatrix> data;
HostDeviceVector<bst_float> predictions;
};
private:
void DeviceOffsets(const HostDeviceVector<size_t>& data, std::vector<size_t>* out_offsets) {
auto& offsets = *out_offsets;
offsets.resize(devices_.Size() + 1);
offsets[0] = 0;
#pragma omp parallel for schedule(static, 1) if (devices_.Size() > 1)
for (int shard = 0; shard < devices_.Size(); ++shard) {
int device = devices_.DeviceId(shard);
auto data_span = data.DeviceSpan(device);
dh::safe_cuda(cudaSetDevice(device));
// copy the last element from every shard
dh::safe_cuda(cudaMemcpy(&offsets.at(shard + 1),
&data_span[data_span.size()-1],
sizeof(size_t), cudaMemcpyDeviceToHost));
}
}
struct DeviceShard {
DeviceShard() : device_(-1) {}
void Init(int device) {
this->device_ = device;
max_shared_memory_bytes = dh::MaxSharedMemory(this->device_);
}
void PredictInternal
(const SparsePage& batch, const MetaInfo& info,
HostDeviceVector<bst_float>* predictions,
const gbm::GBTreeModel& model,
const thrust::host_vector<size_t>& h_tree_segments,
const thrust::host_vector<DevicePredictionNode>& h_nodes,
size_t tree_begin, size_t tree_end) {
dh::safe_cuda(cudaSetDevice(device_));
nodes.resize(h_nodes.size());
dh::safe_cuda(cudaMemcpy(dh::Raw(nodes), h_nodes.data(),
sizeof(DevicePredictionNode) * h_nodes.size(),
cudaMemcpyHostToDevice));
tree_segments.resize(h_tree_segments.size());
dh::safe_cuda(cudaMemcpy(dh::Raw(tree_segments), h_tree_segments.data(),
sizeof(size_t) * h_tree_segments.size(),
cudaMemcpyHostToDevice));
tree_group.resize(model.tree_info.size());
dh::safe_cuda(cudaMemcpy(dh::Raw(tree_group), model.tree_info.data(),
sizeof(int) * model.tree_info.size(),
cudaMemcpyHostToDevice));
const int BLOCK_THREADS = 128;
size_t num_rows = batch.offset.DeviceSize(device_) - 1;
if (num_rows < 1) { return; }
const int GRID_SIZE = static_cast<int>(dh::DivRoundUp(num_rows, BLOCK_THREADS));
int shared_memory_bytes = static_cast<int>
(sizeof(float) * info.num_col_ * BLOCK_THREADS);
bool use_shared = true;
if (shared_memory_bytes > max_shared_memory_bytes) {
shared_memory_bytes = 0;
use_shared = false;
}
const auto& data_distr = batch.data.Distribution();
size_t entry_start = data_distr.ShardStart(batch.data.Size(),
data_distr.Devices().Index(device_));
PredictKernel<BLOCK_THREADS><<<GRID_SIZE, BLOCK_THREADS, shared_memory_bytes>>>
(dh::ToSpan(nodes), predictions->DeviceSpan(device_), dh::ToSpan(tree_segments),
dh::ToSpan(tree_group), batch.offset.DeviceSpan(device_),
batch.data.DeviceSpan(device_), tree_begin, tree_end, info.num_col_,
num_rows, entry_start, use_shared, model.param.num_output_group);
dh::safe_cuda(cudaGetLastError());
dh::safe_cuda(cudaDeviceSynchronize());
}
int device_;
thrust::device_vector<DevicePredictionNode> nodes;
thrust::device_vector<size_t> tree_segments;
thrust::device_vector<int> tree_group;
size_t max_shared_memory_bytes;
};
void DevicePredictInternal(DMatrix* dmat,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) {
if (tree_end - tree_begin == 0) { return; }
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
thrust::host_vector<size_t> h_tree_segments;
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
thrust::host_vector<DevicePredictionNode> h_nodes(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
std::copy(src_nodes.begin(), src_nodes.end(),
h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]);
}
size_t i_batch = 0;
for (const auto &batch : dmat->GetRowBatches()) {
CHECK_EQ(i_batch, 0) << "External memory not supported";
size_t n_rows = batch.offset.Size() - 1;
// out_preds have been resharded and resized in InitOutPredictions()
batch.offset.Reshard(GPUDistribution::Overlap(devices_, 1));
std::vector<size_t> device_offsets;
DeviceOffsets(batch.offset, &device_offsets);
batch.data.Reshard(GPUDistribution::Explicit(devices_, device_offsets));
dh::ExecuteIndexShards(&shards, [&](int idx, DeviceShard& shard) {
shard.PredictInternal(batch, dmat->Info(), out_preds, model,
h_tree_segments, h_nodes, tree_begin, tree_end);
});
i_batch++;
}
}
public:
GPUPredictor() : cpu_predictor(Predictor::Create("cpu_predictor")) {}
void PredictBatch(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, int tree_begin,
unsigned ntree_limit = 0) override {
GPUSet devices = GPUSet::All(
param.gpu_id, param.n_gpus, dmat->Info().num_row_);
ConfigureShards(devices);
if (this->PredictFromCache(dmat, out_preds, model, ntree_limit)) {
return;
}
this->InitOutPredictions(dmat->Info(), out_preds, model);
int tree_end = ntree_limit * model.param.num_output_group;
if (ntree_limit == 0 || ntree_limit > model.trees.size()) {
tree_end = static_cast<unsigned>(model.trees.size());
}
DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
protected:
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const {
size_t n_classes = model.param.num_output_group;
size_t n = n_classes * info.num_row_;
const HostDeviceVector<bst_float>& base_margin = info.base_margin_;
out_preds->Reshard(GPUDistribution::Granular(devices_, n_classes));
out_preds->Resize(n);
if (base_margin.Size() != 0) {
CHECK_EQ(out_preds->Size(), n);
out_preds->Copy(base_margin);
} else {
out_preds->Fill(model.base_margin);
}
}
bool PredictFromCache(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit) {
if (ntree_limit == 0 ||
ntree_limit * model.param.num_output_group >= model.trees.size()) {
auto it = cache_.find(dmat);
if (it != cache_.end()) {
const HostDeviceVector<bst_float>& y = it->second.predictions;
if (y.Size() != 0) {
out_preds->Reshard(y.Distribution());
out_preds->Resize(y.Size());
out_preds->Copy(y);
return true;
}
}
}
return false;
}
void UpdatePredictionCache(
const gbm::GBTreeModel& model,
std::vector<std::unique_ptr<TreeUpdater>>* updaters,
int num_new_trees) override {
auto old_ntree = model.trees.size() - num_new_trees;
// update cache entry
for (auto& kv : cache_) {
PredictionCacheEntry& e = kv.second;
DMatrix* dmat = kv.first;
HostDeviceVector<bst_float>& predictions = e.predictions;
if (predictions.Size() == 0) {
this->InitOutPredictions(dmat->Info(), &predictions, model);
}
if (model.param.num_output_group == 1 && updaters->size() > 0 &&
num_new_trees == 1 &&
updaters->back()->UpdatePredictionCache(e.data.get(), &predictions)) {
// do nothing
} else {
DevicePredictInternal(dmat, &predictions, model, old_ntree, model.trees.size());
}
}
}
void PredictInstance(const SparsePage::Inst& inst,
std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit,
unsigned root_index) override {
cpu_predictor->PredictInstance(inst, out_preds, model, root_index);
}
void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model,
unsigned ntree_limit) override {
cpu_predictor->PredictLeaf(p_fmat, out_preds, model, ntree_limit);
}
void PredictContribution(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned ntree_limit,
bool approximate, int condition,
unsigned condition_feature) override {
cpu_predictor->PredictContribution(p_fmat, out_contribs, model, ntree_limit,
approximate, condition,
condition_feature);
}
void PredictInteractionContributions(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned ntree_limit,
bool approximate) override {
cpu_predictor->PredictInteractionContributions(p_fmat, out_contribs, model,
ntree_limit, approximate);
}
void Init(const std::vector<std::pair<std::string, std::string>>& cfg,
const std::vector<std::shared_ptr<DMatrix>>& cache) override {
Predictor::Init(cfg, cache);
cpu_predictor->Init(cfg, cache);
param.InitAllowUnknown(cfg);
GPUSet devices = GPUSet::All(param.gpu_id, param.n_gpus);
ConfigureShards(devices);
}
private:
/*! \brief Re configure shards when GPUSet is changed. */
void ConfigureShards(GPUSet devices) {
if (devices_ == devices) return;
devices_ = devices;
shards.clear();
shards.resize(devices_.Size());
dh::ExecuteIndexShards(&shards, [=](size_t i, DeviceShard& shard){
shard.Init(devices_.DeviceId(i));
});
}
GPUPredictionParam param;
std::unique_ptr<Predictor> cpu_predictor;
std::vector<DeviceShard> shards;
GPUSet devices_;
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([]() { return new GPUPredictor(); });
} // namespace predictor
} // namespace xgboost
|
0558f5050040cb4a9382de53674c1cedf053cc64.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include <stdio.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
//sync
int main_kernel()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| 0558f5050040cb4a9382de53674c1cedf053cc64.cu |
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
//sync
int main_kernel()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
1e99f478994af56e3a8d7f8ee9a58aa9d5af15fc.hip | // !!! This is a file automatically generated by hipify!!!
/****
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
****/
#include "mpi.h"
#include "mp.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include <string.h>
#include <stdio.h>
#include "assert.h"
#include <limits.h>
#include <sys/types.h>
#include <unistd.h>
#include "prof.h"
#include "hip/hip_runtime_api.h"
#define CUDA_CHECK(stmt) \
do { \
hipError_t result = (stmt); \
if (hipSuccess != result) { \
fprintf(stderr, "[%s:%d] cuda failed with %s \n", \
__FILE__, __LINE__,hipGetErrorString(result));\
exit(-1); \
} \
assert(hipSuccess == result); \
} while (0)
#define MP_CHECK(stmt) \
do { \
int result = (stmt); \
if (0 != result) { \
fprintf(stderr, "[%s:%d] mp call failed \n", \
__FILE__, __LINE__); \
exit(-1); \
} \
assert(0 == result); \
} while (0)
int enable_debug_prints = 0;
#define mp_dbg_msg(FMT, ARGS...) do \
{ \
if (enable_debug_prints) { \
fprintf(stderr, "[%d] [%d] MP DBG %s() " FMT, getpid(), my_rank, __FUNCTION__ , ## ARGS); \
fflush(stderr); \
} \
} while(0)
#define MAX_SIZE 128*1024
//64*1024
#define ITER_COUNT_SMALL 200
#define ITER_COUNT_LARGE 50
struct prof prof_normal;
struct prof prof_async;
int prof_start = 0;
int prof_idx = 0;
int comm_size, my_rank, peer;
int steps_per_batch = 16, batches_inflight = 4;
int enable_async = 1;
__device__ int counter;
__device__ int clockrate;
__global__ void dummy_kernel(double time)
{
long long int start, stop;
double usec;
start = clock64();
do {
stop = clock64();
usec = ((double)(stop-start)*1000)/((double)clockrate);
counter = usec;
} while(usec < time);
}
/*application and pack buffers*/
void *buf = NULL, *sbuf_d = NULL, *rbuf_d = NULL;
int req_max_inflight = 0, rreq_max_inflight = 0, prepost_depth = 0;
hipStream_t stream;
size_t buf_size;
/*mp specific objects*/
mp_request_t *sreq = NULL;
mp_request_t *rreq = NULL;
mp_reg_t sreg, rreg;
double time_start, time_stop;
int sr_exchange (MPI_Comm comm, int size, int iter_count, int validate, double kernel_time, int use_async, struct prof *prof)
{
int i, j;
double latency;
double time_start, time_stop;
int req_idx = 0, rreq_idx = 0, complete_req_idx = 0, complete_rreq_idx = 0;
int req_inflight = 0, rreq_inflight = 0;
if (validate) {
mp_dbg_msg("initializing the buffer \n");
CUDA_CHECK(hipMemset(sbuf_d, (size + 1)%CHAR_MAX, buf_size));
CUDA_CHECK(hipMemset(rbuf_d, 0, buf_size));
CUDA_CHECK(hipDeviceSynchronize());
}
time_start = MPI_Wtime();
for (j=0; j<prepost_depth; j++) {
mp_dbg_msg("[%d] posted recv request: %d \n", my_rank, rreq_idx);
MP_CHECK(mp_irecv ((void *)((uintptr_t)rbuf_d + size*j), size, peer, &rreg, &rreq[rreq_idx]));
rreq_idx = (rreq_idx + 1)%rreq_max_inflight;
rreq_inflight++;
}
prof_idx = 0;
for (j = 0; j < iter_count; j++) {
mp_dbg_msg("[%d] iteration :%d \n", my_rank, j);
if (!my_rank) {
if (prof) PROF(prof, prof_idx++);
req_idx = j%req_max_inflight;
mp_dbg_msg("[%d] posted send request: %d \n", my_rank, req_idx);
if (!use_async) {
MP_CHECK(mp_isend ((void *)((uintptr_t)sbuf_d + size*j), size, peer, &sreg, &sreq[req_idx]));
} else {
MP_CHECK(mp_isend_on_stream ((void *)((uintptr_t)sbuf_d + size*j), size, peer, &sreg, &sreq[req_idx], stream));
}
if (prof) PROF(prof, prof_idx++);
req_idx = j%rreq_max_inflight;
if (!use_async) {
MP_CHECK(mp_wait(&rreq[req_idx]));
} else {
MP_CHECK(mp_wait_on_stream(&rreq[req_idx], stream));
}
} else {
req_idx = j%rreq_max_inflight;
if (!use_async) {
MP_CHECK(mp_wait(&rreq[req_idx]));
} else {
MP_CHECK(mp_wait_on_stream(&rreq[req_idx], stream));
}
if (kernel_time > 0) {
hipLaunchKernelGGL(( dummy_kernel) , dim3(1), dim3(1), 0, stream, kernel_time);
if (!use_async) {
CUDA_CHECK(hipStreamSynchronize(stream));
}
}
req_idx = j%req_max_inflight;
mp_dbg_msg("[%d] posted send request: %d \n", my_rank, req_idx);
if (!use_async) {
MP_CHECK(mp_isend ((void *)((uintptr_t)sbuf_d + size*j), size, peer, &sreg, &sreq[req_idx]));
} else {
MP_CHECK(mp_isend_on_stream ((void *)((uintptr_t)sbuf_d + size*j), size, peer, &sreg, &sreq[req_idx], stream));
}
}
req_inflight++;
mp_dbg_msg("[%d] requests inflight: %d \n", my_rank, req_inflight);
if (!my_rank && prof) PROF(prof, prof_idx++);
if ((j + prepost_depth) < iter_count) {
mp_dbg_msg("[%d] posted recv request: %d \n", my_rank, rreq_idx);
int buf_idx = (j + prepost_depth);
MP_CHECK(mp_irecv ((void *)((uintptr_t)rbuf_d + size*buf_idx), size, peer, &rreg, &rreq[rreq_idx]));
rreq_idx = (rreq_idx + 1)%rreq_max_inflight;
rreq_inflight++;
}
if (!my_rank && prof) PROF(prof, prof_idx++);
/*synchronize on oldest batch*/
if (req_inflight == req_max_inflight) {
if (use_async) {
for (i=0; i<steps_per_batch; i++) {
mp_dbg_msg("[%d] waiting on recv request: %d \n", my_rank, complete_rreq_idx);
MP_CHECK(mp_wait(&rreq[complete_rreq_idx]));
mp_dbg_msg("[%d] completed recv request: %d \n", my_rank, complete_rreq_idx);
complete_rreq_idx = (complete_rreq_idx + 1)%rreq_max_inflight;
rreq_inflight--;
}
}
mp_dbg_msg("[%d] after waiting on recv, rreq_inflight: %d \n", my_rank, rreq_inflight);
for (i=0; i<steps_per_batch; i++) {
mp_dbg_msg("[%d] waiting on send request: %d \n", my_rank, complete_req_idx);
MP_CHECK(mp_wait(&sreq[complete_req_idx]));
mp_dbg_msg("[%d] completed send request: %d \n", my_rank, complete_req_idx);
complete_req_idx = (complete_req_idx + 1)%req_max_inflight;
req_inflight--;
}
mp_dbg_msg("[%d] after waiting on send, req_inflight: %d \n", my_rank, req_inflight);
}
if (j == (iter_count - 1)) {
/*ideally, there should be validation here*/
if (use_async) {
while (rreq_inflight > 0) {
mp_wait(&rreq[complete_rreq_idx]);
mp_dbg_msg("[%d] completed recv request: %d \n", my_rank, complete_rreq_idx);
complete_rreq_idx = (complete_rreq_idx + 1)%rreq_max_inflight;
rreq_inflight--;
}
}
while (req_inflight > 0) {
mp_wait(&sreq[complete_req_idx]);
mp_dbg_msg("[%d] completed send request: %d \n", my_rank, complete_req_idx);
complete_req_idx = (complete_req_idx + 1)%req_max_inflight;
req_inflight--;
}
}
if (!my_rank && prof) {
PROF(prof, prof_idx++);
prof_update(prof);
prof_idx = 0;
}
}
if (validate) {
CUDA_CHECK(hipMemcpy((void *)((uintptr_t)buf), (void *)((uintptr_t)rbuf_d),
buf_size, hipMemcpyDefault));
//CUDA_CHECK(hipDeviceSynchronize());
char *value = (char *)((uintptr_t)buf);
for (i=0; i<buf_size; i++) {
if (value[i] != (size + 1)%CHAR_MAX) {
mp_dbg_msg("[%d] validation check failed index: %d expected: %d actual: %d \n",
my_rank, i, (size + 1)%CHAR_MAX, value[i]);
exit(-1);
}
}
}
MPI_Barrier(comm);
time_stop = MPI_Wtime();
latency = (((time_stop - time_start)*1e6)/(iter_count*2));
CUDA_CHECK(hipDeviceSynchronize());
return latency;
}
int main (int c, char *v[])
{
int iter_count, size, dev_count, local_rank, dev_id = 0;
int kernel_time = 20;
int comm_comp_ratio = 0;
int validate = 0;
char *value = getenv("ENABLE_VALIDATION");
if (value != NULL) {
validate = atoi(value);
}
value = getenv("ENABLE_DEBUG_MSG");
if (value != NULL) {
enable_debug_prints = atoi(value);
}
value = getenv("KRENEL_TIME");
if (value != NULL) {
kernel_time = atoi(value);
}
value = getenv("COMM_COMP_RATIO");
if (value != NULL) {
comm_comp_ratio = atoi(value);
}
size = 0;
value = getenv("SIZE");
if (value != NULL && atoi(value)) {
size = atoi(value);
}
MPI_Init(&c, &v);
MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
if (comm_size != 2) {
fprintf(stderr, "this test requires exactly two processes \n");
exit(-1);
}
CUDA_CHECK(hipGetDeviceCount(&dev_count));
if (dev_count <= 0) {
fprintf(stderr, "no CUDA devices found \n");
exit(-1);
}
if (getenv("MV2_COMM_WORLD_LOCAL_RANK") != NULL) {
local_rank = atoi(getenv("MV2_COMM_WORLD_LOCAL_RANK"));
}
dev_id = local_rank%dev_count;
fprintf(stdout, "[%d] local_rank: %d dev_count: %d using GPU device: %d \n", my_rank, local_rank, dev_count, dev_id);
CUDA_CHECK(hipSetDevice(dev_id));
CUDA_CHECK(hipFree(0));
hipDeviceProp_t prop;
CUDA_CHECK(hipGetDeviceProperties(&prop, dev_id));
CUDA_CHECK(hipMemcpyToSymbol(clockrate, (void *)&prop.clockRate, sizeof(int), 0, hipMemcpyHostToDevice));
peer = !my_rank;
MP_CHECK(mp_init (MPI_COMM_WORLD, &peer, 1, MP_INIT_DEFAULT, dev_id));
iter_count = ITER_COUNT_SMALL;
if (!my_rank) {
fprintf(stdout, "steps_per_batch: %d batches_inflight: %d \n",
steps_per_batch, batches_inflight);
}
prepost_depth = (steps_per_batch < iter_count) ? steps_per_batch : iter_count;
req_max_inflight = steps_per_batch*batches_inflight;
rreq_max_inflight = (steps_per_batch*batches_inflight + prepost_depth);
/*allocating requests*/
sreq = (mp_request_t *) malloc(req_max_inflight*sizeof(mp_request_t));
rreq = (mp_request_t *) malloc(rreq_max_inflight*sizeof(mp_request_t));
CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
if (!my_rank) fprintf(stdout, "%10s %10s %10s %10s %10s %10s \n", "Size", "KernelTime", "No-asyncl", "No-async+Kernel", "Async", "Async+Kernel");
for (size=1; size<=MAX_SIZE; size*=2)
{
double latency;
char *tags = "kernel|send|recv|prepost|wait|";
if (size > 1024) {
iter_count = ITER_COUNT_LARGE;
}
buf_size = size*iter_count;
buf = malloc (buf_size);
memset(buf, 0, buf_size);
CUDA_CHECK(hipMalloc((void **)&sbuf_d, buf_size));
CUDA_CHECK(hipMemset(sbuf_d, 0, buf_size));
CUDA_CHECK(hipMalloc((void **)&rbuf_d, buf_size));
CUDA_CHECK(hipMemset(rbuf_d, 0, buf_size));
MP_CHECK(mp_register(sbuf_d, buf_size, &sreg));
MP_CHECK(mp_register(rbuf_d, buf_size, &rreg));
if (!my_rank) {
if (prof_init(&prof_normal, 1000, 1000, "1us", 100, 1, tags)) {
fprintf(stderr, "error in prof_init init.\n");
exit(-1);
}
if (prof_init(&prof_async, 1000, 1000, "1us", 100, 1, tags)) {
fprintf(stderr, "error in prof_init init.\n");
exit(-1);
}
prof_start = 1;
}
if (!my_rank) fprintf(stdout, "%10d", size);
/*warmup*/
latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 0/*use_async*/, NULL/*prof*/);
latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 1/*use_async*/, NULL/*prof*/);
/*Normal*/
latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 0/*use_async*/, NULL/*prof*/);
kernel_time = (comm_comp_ratio > 0) ? comm_comp_ratio*latency : kernel_time;
if (!my_rank) fprintf(stdout, "\t %10d", kernel_time);
if (!my_rank) fprintf(stdout, "\t %8.2lf", latency);
hipProfilerStart();
/*Normal + Kernel*/
latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, kernel_time, 0/*use_async*/, &prof_normal/*prof*/);
if (!my_rank) fprintf(stdout, "\t %8.2lf", latency);
hipProfilerStop();
/*Async*/
latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 1/*use_async*/, NULL/*prof*/);
if (!my_rank) fprintf(stdout, "\t %8.2lf", latency);
hipProfilerStart();
/*Async + Kernel*/
latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, kernel_time, 1/*use_async*/, &prof_async/*prof*/);
if (!my_rank) fprintf(stdout, "\t %8.2lf \n", latency);
hipProfilerStop();
if (!my_rank && validate) fprintf(stdout, "SendRecv test passed validation with message size: %d \n", size);
if (!my_rank) {
prof_dump(&prof_normal);
prof_dump(&prof_async);
}
mp_deregister(&sreg);
mp_deregister(&rreg);
CUDA_CHECK(hipFree(sbuf_d));
CUDA_CHECK(hipFree(rbuf_d));
free(buf);
}
CUDA_CHECK(hipStreamDestroy(stream));
free(sreq);
free(rreq);
mp_finalize ();
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}
| 1e99f478994af56e3a8d7f8ee9a58aa9d5af15fc.cu | /****
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
****/
#include "mpi.h"
#include "mp.h"
#include "cuda.h"
#include "cuda_runtime.h"
#include <string.h>
#include <stdio.h>
#include "assert.h"
#include <limits.h>
#include <sys/types.h>
#include <unistd.h>
#include "prof.h"
#include "cuda_profiler_api.h"
#define CUDA_CHECK(stmt) \
do { \
cudaError_t result = (stmt); \
if (cudaSuccess != result) { \
fprintf(stderr, "[%s:%d] cuda failed with %s \n", \
__FILE__, __LINE__,cudaGetErrorString(result));\
exit(-1); \
} \
assert(cudaSuccess == result); \
} while (0)
#define MP_CHECK(stmt) \
do { \
int result = (stmt); \
if (0 != result) { \
fprintf(stderr, "[%s:%d] mp call failed \n", \
__FILE__, __LINE__); \
exit(-1); \
} \
assert(0 == result); \
} while (0)
int enable_debug_prints = 0;
#define mp_dbg_msg(FMT, ARGS...) do \
{ \
if (enable_debug_prints) { \
fprintf(stderr, "[%d] [%d] MP DBG %s() " FMT, getpid(), my_rank, __FUNCTION__ , ## ARGS); \
fflush(stderr); \
} \
} while(0)
#define MAX_SIZE 128*1024
//64*1024
#define ITER_COUNT_SMALL 200
#define ITER_COUNT_LARGE 50
struct prof prof_normal;
struct prof prof_async;
int prof_start = 0;
int prof_idx = 0;
int comm_size, my_rank, peer;
int steps_per_batch = 16, batches_inflight = 4;
int enable_async = 1;
__device__ int counter;
__device__ int clockrate;
__global__ void dummy_kernel(double time)
{
long long int start, stop;
double usec;
start = clock64();
do {
stop = clock64();
usec = ((double)(stop-start)*1000)/((double)clockrate);
counter = usec;
} while(usec < time);
}
/*application and pack buffers*/
void *buf = NULL, *sbuf_d = NULL, *rbuf_d = NULL;
int req_max_inflight = 0, rreq_max_inflight = 0, prepost_depth = 0;
cudaStream_t stream;
size_t buf_size;
/*mp specific objects*/
mp_request_t *sreq = NULL;
mp_request_t *rreq = NULL;
mp_reg_t sreg, rreg;
double time_start, time_stop;
int sr_exchange (MPI_Comm comm, int size, int iter_count, int validate, double kernel_time, int use_async, struct prof *prof)
{
int i, j;
double latency;
double time_start, time_stop;
int req_idx = 0, rreq_idx = 0, complete_req_idx = 0, complete_rreq_idx = 0;
int req_inflight = 0, rreq_inflight = 0;
if (validate) {
mp_dbg_msg("initializing the buffer \n");
CUDA_CHECK(cudaMemset(sbuf_d, (size + 1)%CHAR_MAX, buf_size));
CUDA_CHECK(cudaMemset(rbuf_d, 0, buf_size));
CUDA_CHECK(cudaDeviceSynchronize());
}
time_start = MPI_Wtime();
for (j=0; j<prepost_depth; j++) {
mp_dbg_msg("[%d] posted recv request: %d \n", my_rank, rreq_idx);
MP_CHECK(mp_irecv ((void *)((uintptr_t)rbuf_d + size*j), size, peer, &rreg, &rreq[rreq_idx]));
rreq_idx = (rreq_idx + 1)%rreq_max_inflight;
rreq_inflight++;
}
prof_idx = 0;
for (j = 0; j < iter_count; j++) {
mp_dbg_msg("[%d] iteration :%d \n", my_rank, j);
if (!my_rank) {
if (prof) PROF(prof, prof_idx++);
req_idx = j%req_max_inflight;
mp_dbg_msg("[%d] posted send request: %d \n", my_rank, req_idx);
if (!use_async) {
MP_CHECK(mp_isend ((void *)((uintptr_t)sbuf_d + size*j), size, peer, &sreg, &sreq[req_idx]));
} else {
MP_CHECK(mp_isend_on_stream ((void *)((uintptr_t)sbuf_d + size*j), size, peer, &sreg, &sreq[req_idx], stream));
}
if (prof) PROF(prof, prof_idx++);
req_idx = j%rreq_max_inflight;
if (!use_async) {
MP_CHECK(mp_wait(&rreq[req_idx]));
} else {
MP_CHECK(mp_wait_on_stream(&rreq[req_idx], stream));
}
} else {
req_idx = j%rreq_max_inflight;
if (!use_async) {
MP_CHECK(mp_wait(&rreq[req_idx]));
} else {
MP_CHECK(mp_wait_on_stream(&rreq[req_idx], stream));
}
if (kernel_time > 0) {
dummy_kernel <<<1, 1, 0, stream>>> (kernel_time);
if (!use_async) {
CUDA_CHECK(cudaStreamSynchronize(stream));
}
}
req_idx = j%req_max_inflight;
mp_dbg_msg("[%d] posted send request: %d \n", my_rank, req_idx);
if (!use_async) {
MP_CHECK(mp_isend ((void *)((uintptr_t)sbuf_d + size*j), size, peer, &sreg, &sreq[req_idx]));
} else {
MP_CHECK(mp_isend_on_stream ((void *)((uintptr_t)sbuf_d + size*j), size, peer, &sreg, &sreq[req_idx], stream));
}
}
req_inflight++;
mp_dbg_msg("[%d] requests inflight: %d \n", my_rank, req_inflight);
if (!my_rank && prof) PROF(prof, prof_idx++);
if ((j + prepost_depth) < iter_count) {
mp_dbg_msg("[%d] posted recv request: %d \n", my_rank, rreq_idx);
int buf_idx = (j + prepost_depth);
MP_CHECK(mp_irecv ((void *)((uintptr_t)rbuf_d + size*buf_idx), size, peer, &rreg, &rreq[rreq_idx]));
rreq_idx = (rreq_idx + 1)%rreq_max_inflight;
rreq_inflight++;
}
if (!my_rank && prof) PROF(prof, prof_idx++);
/*synchronize on oldest batch*/
if (req_inflight == req_max_inflight) {
if (use_async) {
for (i=0; i<steps_per_batch; i++) {
mp_dbg_msg("[%d] waiting on recv request: %d \n", my_rank, complete_rreq_idx);
MP_CHECK(mp_wait(&rreq[complete_rreq_idx]));
mp_dbg_msg("[%d] completed recv request: %d \n", my_rank, complete_rreq_idx);
complete_rreq_idx = (complete_rreq_idx + 1)%rreq_max_inflight;
rreq_inflight--;
}
}
mp_dbg_msg("[%d] after waiting on recv, rreq_inflight: %d \n", my_rank, rreq_inflight);
for (i=0; i<steps_per_batch; i++) {
mp_dbg_msg("[%d] waiting on send request: %d \n", my_rank, complete_req_idx);
MP_CHECK(mp_wait(&sreq[complete_req_idx]));
mp_dbg_msg("[%d] completed send request: %d \n", my_rank, complete_req_idx);
complete_req_idx = (complete_req_idx + 1)%req_max_inflight;
req_inflight--;
}
mp_dbg_msg("[%d] after waiting on send, req_inflight: %d \n", my_rank, req_inflight);
}
if (j == (iter_count - 1)) {
/*ideally, there should be validation here*/
if (use_async) {
while (rreq_inflight > 0) {
mp_wait(&rreq[complete_rreq_idx]);
mp_dbg_msg("[%d] completed recv request: %d \n", my_rank, complete_rreq_idx);
complete_rreq_idx = (complete_rreq_idx + 1)%rreq_max_inflight;
rreq_inflight--;
}
}
while (req_inflight > 0) {
mp_wait(&sreq[complete_req_idx]);
mp_dbg_msg("[%d] completed send request: %d \n", my_rank, complete_req_idx);
complete_req_idx = (complete_req_idx + 1)%req_max_inflight;
req_inflight--;
}
}
if (!my_rank && prof) {
PROF(prof, prof_idx++);
prof_update(prof);
prof_idx = 0;
}
}
if (validate) {
CUDA_CHECK(cudaMemcpy((void *)((uintptr_t)buf), (void *)((uintptr_t)rbuf_d),
buf_size, cudaMemcpyDefault));
//CUDA_CHECK(cudaDeviceSynchronize());
char *value = (char *)((uintptr_t)buf);
for (i=0; i<buf_size; i++) {
if (value[i] != (size + 1)%CHAR_MAX) {
mp_dbg_msg("[%d] validation check failed index: %d expected: %d actual: %d \n",
my_rank, i, (size + 1)%CHAR_MAX, value[i]);
exit(-1);
}
}
}
MPI_Barrier(comm);
time_stop = MPI_Wtime();
latency = (((time_stop - time_start)*1e6)/(iter_count*2));
CUDA_CHECK(cudaDeviceSynchronize());
return latency;
}
int main (int c, char *v[])
{
int iter_count, size, dev_count, local_rank, dev_id = 0;
int kernel_time = 20;
int comm_comp_ratio = 0;
int validate = 0;
char *value = getenv("ENABLE_VALIDATION");
if (value != NULL) {
validate = atoi(value);
}
value = getenv("ENABLE_DEBUG_MSG");
if (value != NULL) {
enable_debug_prints = atoi(value);
}
value = getenv("KRENEL_TIME");
if (value != NULL) {
kernel_time = atoi(value);
}
value = getenv("COMM_COMP_RATIO");
if (value != NULL) {
comm_comp_ratio = atoi(value);
}
size = 0;
value = getenv("SIZE");
if (value != NULL && atoi(value)) {
size = atoi(value);
}
MPI_Init(&c, &v);
MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
if (comm_size != 2) {
fprintf(stderr, "this test requires exactly two processes \n");
exit(-1);
}
CUDA_CHECK(cudaGetDeviceCount(&dev_count));
if (dev_count <= 0) {
fprintf(stderr, "no CUDA devices found \n");
exit(-1);
}
if (getenv("MV2_COMM_WORLD_LOCAL_RANK") != NULL) {
local_rank = atoi(getenv("MV2_COMM_WORLD_LOCAL_RANK"));
}
dev_id = local_rank%dev_count;
fprintf(stdout, "[%d] local_rank: %d dev_count: %d using GPU device: %d \n", my_rank, local_rank, dev_count, dev_id);
CUDA_CHECK(cudaSetDevice(dev_id));
CUDA_CHECK(cudaFree(0));
cudaDeviceProp prop;
CUDA_CHECK(cudaGetDeviceProperties(&prop, dev_id));
CUDA_CHECK(cudaMemcpyToSymbol(clockrate, (void *)&prop.clockRate, sizeof(int), 0, cudaMemcpyHostToDevice));
peer = !my_rank;
MP_CHECK(mp_init (MPI_COMM_WORLD, &peer, 1, MP_INIT_DEFAULT, dev_id));
iter_count = ITER_COUNT_SMALL;
if (!my_rank) {
fprintf(stdout, "steps_per_batch: %d batches_inflight: %d \n",
steps_per_batch, batches_inflight);
}
prepost_depth = (steps_per_batch < iter_count) ? steps_per_batch : iter_count;
req_max_inflight = steps_per_batch*batches_inflight;
rreq_max_inflight = (steps_per_batch*batches_inflight + prepost_depth);
/*allocating requests*/
sreq = (mp_request_t *) malloc(req_max_inflight*sizeof(mp_request_t));
rreq = (mp_request_t *) malloc(rreq_max_inflight*sizeof(mp_request_t));
CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
if (!my_rank) fprintf(stdout, "%10s %10s %10s %10s %10s %10s \n", "Size", "KernelTime", "No-asyncl", "No-async+Kernel", "Async", "Async+Kernel");
for (size=1; size<=MAX_SIZE; size*=2)
{
double latency;
char *tags = "kernel|send|recv|prepost|wait|";
if (size > 1024) {
iter_count = ITER_COUNT_LARGE;
}
buf_size = size*iter_count;
buf = malloc (buf_size);
memset(buf, 0, buf_size);
CUDA_CHECK(cudaMalloc((void **)&sbuf_d, buf_size));
CUDA_CHECK(cudaMemset(sbuf_d, 0, buf_size));
CUDA_CHECK(cudaMalloc((void **)&rbuf_d, buf_size));
CUDA_CHECK(cudaMemset(rbuf_d, 0, buf_size));
MP_CHECK(mp_register(sbuf_d, buf_size, &sreg));
MP_CHECK(mp_register(rbuf_d, buf_size, &rreg));
if (!my_rank) {
if (prof_init(&prof_normal, 1000, 1000, "1us", 100, 1, tags)) {
fprintf(stderr, "error in prof_init init.\n");
exit(-1);
}
if (prof_init(&prof_async, 1000, 1000, "1us", 100, 1, tags)) {
fprintf(stderr, "error in prof_init init.\n");
exit(-1);
}
prof_start = 1;
}
if (!my_rank) fprintf(stdout, "%10d", size);
/*warmup*/
latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 0/*use_async*/, NULL/*prof*/);
latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 1/*use_async*/, NULL/*prof*/);
/*Normal*/
latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 0/*use_async*/, NULL/*prof*/);
kernel_time = (comm_comp_ratio > 0) ? comm_comp_ratio*latency : kernel_time;
if (!my_rank) fprintf(stdout, "\t %10d", kernel_time);
if (!my_rank) fprintf(stdout, "\t %8.2lf", latency);
cudaProfilerStart();
/*Normal + Kernel*/
latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, kernel_time, 0/*use_async*/, &prof_normal/*prof*/);
if (!my_rank) fprintf(stdout, "\t %8.2lf", latency);
cudaProfilerStop();
/*Async*/
latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 1/*use_async*/, NULL/*prof*/);
if (!my_rank) fprintf(stdout, "\t %8.2lf", latency);
cudaProfilerStart();
/*Async + Kernel*/
latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, kernel_time, 1/*use_async*/, &prof_async/*prof*/);
if (!my_rank) fprintf(stdout, "\t %8.2lf \n", latency);
cudaProfilerStop();
if (!my_rank && validate) fprintf(stdout, "SendRecv test passed validation with message size: %d \n", size);
if (!my_rank) {
prof_dump(&prof_normal);
prof_dump(&prof_async);
}
mp_deregister(&sreg);
mp_deregister(&rreg);
CUDA_CHECK(cudaFree(sbuf_d));
CUDA_CHECK(cudaFree(rbuf_d));
free(buf);
}
CUDA_CHECK(cudaStreamDestroy(stream));
free(sreq);
free(rreq);
mp_finalize ();
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}
|
1b6f0262a8132a28b2c901428b67ad5e9fb732d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <torch/library.h>
// Copied and adapted from
// Adapted from interp.cpp from Caffe util by Pauline Luc
// Originally developed by George Papandreou
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/hip/KernelUtils.cuh>
#include <ATen/native/hip/UpSample.cuh>
// Below is experimental temporary code before merging it to PyTorch
namespace at {
namespace native {
namespace internal_upsample {
__device__ __forceinline__ size_t
idx(const size_t nc,
const size_t height,
const size_t width,
const size_t y,
const size_t x) {
return (nc * height + y) * width + x;
}
// taken from
// https://github.com/python-pillow/Pillow/blob/6812205f18ca4ef54372e87e1a13ce4a859434df/
// src/libImaging/Resample.c#L20-L29
template <typename accscalar_t>
__device__ __forceinline__ static accscalar_t bilinear_filter(accscalar_t x) {
if (x < 0.0) {
x = -x;
}
if (x < 1.0) {
return static_cast<accscalar_t>(1.0) - x;
}
return static_cast<accscalar_t>(0.0);
}
// taken from
// https://github.com/python-pillow/Pillow/blob/6812205f18ca4ef54372e87e1a13ce4a859434df/
// src/libImaging/Resample.c#L46-L62
template <typename accscalar_t>
__device__ __forceinline__ static accscalar_t bicubic_filter(accscalar_t x) {
// https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm
#define a -0.5
if (x < 0.0) {
x = -x;
}
if (x < 1.0) {
return ((a + 2.0) * x - (a + 3.0)) * x * x + static_cast<accscalar_t>(1.0);
}
if (x < 2.0) {
return (((x - 5) * x + 8) * x - 4) * a;
}
return static_cast<accscalar_t>(0.0);
#undef a
}
template <typename scalar_t, typename accscalar_t, typename filter_fn_t>
__device__ __forceinline__ static void _compute_weights(
const int i,
const int input_size,
const accscalar_t scale,
const accscalar_t support,
scalar_t* wt_ptr,
int interp_size,
filter_fn_t filter_fn,
int& xmin,
int& xmax) {
accscalar_t invscale = (scale >= 1.0) ? 1.0 / scale : 1.0;
accscalar_t center = scale * (i + 0.5);
xmin = max(static_cast<int>(center - support + 0.5), static_cast<int>(0));
xmax = min(static_cast<int>(center + support + 0.5), input_size) - xmin;
accscalar_t total_w = 0.0;
int j = 0;
for (j = 0; j < xmax; j++) {
accscalar_t w = filter_fn((j + xmin - center + 0.5) * invscale);
wt_ptr[j] = static_cast<scalar_t>(w);
total_w += w;
}
for (j = 0; j < xmax; j++) {
if (total_w != 0.0) {
wt_ptr[j] /= total_w;
}
}
for (; j < interp_size; j++) {
wt_ptr[j] = static_cast<scalar_t>(0.0);
}
}
template <typename scalar_t, typename accscalar_t>
__device__ __forceinline__ static accscalar_t interpolate_aa_single_dim(
scalar_t* src,
scalar_t* weights,
int64_t size) {
scalar_t t = static_cast<accscalar_t>(*src);
scalar_t wts = static_cast<accscalar_t>(weights[0]);
accscalar_t output = t * wts;
int64_t j = 1;
for (; j < size; j++) {
wts = static_cast<accscalar_t>(weights[j]);
t = static_cast<accscalar_t>(*(src + j));
output += t * wts;
}
return output;
}
template <typename scalar_t, typename accscalar_t, int interp_size>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_gen2d_out_frame(
const int n,
const accscalar_t rheight,
const accscalar_t rwidth,
const bool align_corners,
const PackedTensorAccessor64<scalar_t, 4> idata,
PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int height1 = idata.size(2);
const int width1 = idata.size(3);
const int height2 = odata.size(2);
const int width2 = odata.size(3);
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
// special case: just copy
if (height1 == height2 && width1 == width2) {
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = idata[n][c][h1][w1];
odata[n][c][h2][w2] = val;
}
}
return;
}
const accscalar_t support_h = static_cast<accscalar_t>(
(rheight >= 1.0) ? (interp_size * 0.5) * rheight : interp_size * 0.5);
const accscalar_t support_w = static_cast<accscalar_t>(
(rwidth >= 1.0) ? (interp_size * 0.5) * rwidth : interp_size * 0.5);
const int interp_height = (int)ceilf(support_h) * 2 + 1;
const int interp_width = (int)ceilf(support_w) * 2 + 1;
// Setup local buffers
// TODO: maybe we can specify dynamic shared memory size before calling the
// cuda code, however we should then ensure that device has enough shared
// memory
scalar_t wx[256];
scalar_t wy[256];
scalar_t buffer1[256];
scalar_t buffer2[256];
// Compute weights
int xmin, xsize, ymin, ysize;
typedef scalar_t (*filter_fn_t)(scalar_t);
filter_fn_t filter_fn;
if (interp_size == 2) {
filter_fn = bilinear_filter;
} else if (interp_size == 4) {
filter_fn = bicubic_filter;
}
_compute_weights<scalar_t, accscalar_t, filter_fn_t>(
w2,
width1,
rwidth,
support_w,
wx,
interp_width,
filter_fn,
xmin,
xsize);
_compute_weights<scalar_t, accscalar_t, filter_fn_t>(
h2,
height1,
rheight,
support_h,
wy,
interp_height,
filter_fn,
ymin,
ysize);
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
// interpolate on x-axis for ymin to ymin + ysize
for (int y = 0; y < ysize; y++) {
// copy data into the local buffer and use
// interpolate_aa_single_dim method
for (int x = 0; x < xsize; x++) {
buffer1[x] = idata[n][c][ymin + y][xmin + x];
}
buffer2[y] = static_cast<scalar_t>(
interpolate_aa_single_dim<scalar_t, accscalar_t>(
buffer1, wx, xsize));
}
odata[n][c][h2][w2] = static_cast<scalar_t>(
interpolate_aa_single_dim<scalar_t, accscalar_t>(
buffer2, wy, ysize));
}
}
}
}
template <int interp_size>
static void upsample_gen2d_out_cuda_template(
const Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
// Copied and adapted from
// UpSampleBicubic2d.cu::upsample_bicubic2d_out_cuda_template
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_gen2d_out_cuda", {input_arg, output_arg});
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input.size(0);
int channels = input.size(1);
int input_height = input.size(2);
int input_width = input.size(3);
const int num_kernels = output_height * output_width;
const int num_threads = ::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_gen2d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor64<scalar_t, 4>();
auto odata = output.packed_accessor64<scalar_t, 4>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
// We are using static buffer memory of 256 * sizeof(float) per thread
// to store weights. Size of weights array is
// interp_size = scale * 2 + 1 for bilinear mode
TORCH_CHECK(
rheight < (255 / interp_size),
"Max supported scale factor is 127 (bilinear), 63 (bicubic)");
TORCH_CHECK(
rwidth < (255 / interp_size),
"Max supported scale factor is 127 (bilinear), 63 (bicubic)");
hipLaunchKernelGGL(( upsample_gen2d_out_frame<scalar_t, accscalar_t, interp_size>)
, dim3(cuda::ATenCeilDiv(num_kernels, num_threads)),
dim3(num_threads),
0,
stream,
num_kernels, rheight, rwidth, align_corners, idata, odata);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename scalar_t, typename accscalar_t, int interp_size>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_gen2d_backward_out_frame(
const int num_elements,
const accscalar_t height_scale,
const accscalar_t width_scale,
const bool align_corners,
PackedTensorAccessor64<scalar_t, 4> idata,
const PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int input_height = idata.size(2);
const int input_width = idata.size(3);
const int output_height = odata.size(2);
const int output_width = odata.size(3);
if (index >= num_elements) {
return;
}
const int output_x = index % output_width;
const int output_y = index / output_width;
// special case: output just copy
if (input_height == output_height && input_width == output_width) {
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = odata[n][c][output_y][output_x];
idata[n][c][output_y][output_x] = val;
}
}
return;
}
const accscalar_t support_h = static_cast<accscalar_t>(
(height_scale >= 1.0) ? (interp_size * 0.5) * height_scale
: interp_size * 0.5);
const accscalar_t support_w = static_cast<accscalar_t>(
(width_scale >= 1.0) ? (interp_size * 0.5) * width_scale
: interp_size * 0.5);
const int interp_height = (int)ceilf(support_h) * 2 + 1;
const int interp_width = (int)ceilf(support_w) * 2 + 1;
// Setup local buffers
// TODO: maybe we can specify dynamic shared memory size before calling the
// cuda code, however we should then ensure that device has enough shared
// memory
scalar_t wx[256];
scalar_t wy[256];
// Compute weights
int xmin, xsize, ymin, ysize;
typedef scalar_t (*filter_fn_t)(scalar_t);
filter_fn_t filter_fn;
if (interp_size == 2) {
filter_fn = bilinear_filter;
} else if (interp_size == 4) {
filter_fn = bicubic_filter;
}
_compute_weights<scalar_t, accscalar_t, filter_fn_t>(
output_x,
input_width,
width_scale,
support_w,
wx,
interp_width,
filter_fn,
xmin,
xsize);
_compute_weights<scalar_t, accscalar_t, filter_fn_t>(
output_y,
input_height,
height_scale,
support_h,
wy,
interp_height,
filter_fn,
ymin,
ysize);
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
scalar_t out_value = odata[n][c][output_y][output_x];
for (int y = 0; y < ysize; y++) {
for (int x = 0; x < xsize; x++) {
upsample_increment_value_bounded<scalar_t, accscalar_t>(
idata,
n,
c,
input_height,
input_width,
ymin + y,
xmin + x,
wx[x] * wy[y] * out_value);
}
}
}
}
}
template <int interp_size>
static void upsample_gen2d_backward_out_cuda_template(
const Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
// Copied and adapted from
// UpSampleBicubic2d.cu::upsample_bicubic2d_backward_out_cuda_template
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(
"upsample_gen2d_backward_out_cuda", {grad_output_arg, grad_input_arg});
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input_size[0];
int channels = input_size[1];
int input_height = input_size[2];
int input_width = input_size[3];
Tensor grad_output = grad_output_.contiguous();
grad_input.zero_();
const int num_kernels = output_height * output_width;
const int num_threads = ::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "upsample_gen2d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.packed_accessor64<scalar_t, 4>();
auto odata = grad_output.packed_accessor64<scalar_t, 4>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
// We are using static buffer memory of 256 * sizeof(float) per thread
// to store weights. Size of weights array is
// interp_size = scale * 2 + 1 for bilinear mode
TORCH_CHECK(
rheight < (255 / interp_size),
"Max supported scale factor is 127 (bilinear), 63 (bicubic)");
TORCH_CHECK(
rwidth < (255 / interp_size),
"Max supported scale factor is 127 (bilinear), 63 (bicubic)");
hipLaunchKernelGGL(( upsample_gen2d_backward_out_frame<scalar_t, accscalar_t, interp_size>)
, dim3(cuda::ATenCeilDiv(num_kernels, num_threads)),
dim3(num_threads),
0,
stream,
num_kernels, rheight, rwidth, align_corners, idata, odata);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
} // namespace internal_upsample
} // namespace native
} // namespace at
namespace vision {
namespace ops {
namespace {
// Copied from "UpSample.h" as we can not use UpSample.h with UpSample.cuh
static std::array<int64_t, 4> upsample_2d_common_check(
at::IntArrayRef input_size,
at::IntArrayRef output_size) {
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
TORCH_CHECK(
input_size.size() == 4,
"It is expected input_size equals to 4, but got size ",
input_size.size());
int64_t output_height = output_size[0];
int64_t output_width = output_size[1];
int64_t nbatch = input_size[0];
int64_t channels = input_size[1];
int64_t input_height = input_size[2];
int64_t input_width = input_size[3];
TORCH_CHECK(
input_height > 0 && input_width > 0 && output_height > 0 &&
output_width > 0,
"Input and output sizes should be greater than 0,"
" but got input (H: ",
input_height,
", W: ",
input_width,
") output (H: ",
output_height,
", W: ",
output_width,
")");
return {nbatch, channels, output_height, output_width};
}
template <int interp_size>
at::Tensor interpolate_gen2d_aa_forward_kernel(
const at::Tensor& input,
at::IntArrayRef output_size,
bool align_corners) {
c10::optional<c10::ArrayRef<double>> scale_factors = {};
// Copied from UpSampleBilinear2d.cpp
auto output = at::empty({0}, input.options());
auto osize = at::native::upsample::compute_output_size(
input.sizes(), output_size, scale_factors);
auto scale_h = at::native::upsample_cuda::get_scale_value(scale_factors, 0);
auto scale_w = at::native::upsample_cuda::get_scale_value(scale_factors, 1);
auto full_output_size = upsample_2d_common_check(input.sizes(), osize);
// Allow for empty batch size but not other dimensions
TORCH_CHECK(
input.numel() != 0 ||
c10::multiply_integers(
input.sizes().begin() + 1, input.sizes().end()),
"Non-empty 4D data tensor expected but got a tensor with sizes ",
input.sizes());
output.resize_(full_output_size, input.suggest_memory_format());
at::native::internal_upsample::upsample_gen2d_out_cuda_template<interp_size>(
output,
input,
{full_output_size[2], full_output_size[3]},
align_corners,
scale_h,
scale_w);
return output;
}
template <int interp_size>
at::Tensor interpolate_gen2d_aa_backward_kernel(
const at::Tensor& grad_output,
at::IntArrayRef output_size,
at::IntArrayRef input_size,
bool align_corners) {
c10::optional<c10::ArrayRef<double>> scale_factors = {};
// Copied from UpSampleBicubic2d.cpp::upsample_bicubic2d_backward
auto grad_input = at::empty({0}, grad_output.options());
auto osize = at::native::upsample::compute_output_size(
input_size, output_size, scale_factors);
auto scale_h = at::native::upsample_cuda::get_scale_value(scale_factors, 0);
auto scale_w = at::native::upsample_cuda::get_scale_value(scale_factors, 1);
auto full_output_size = upsample_2d_common_check(input_size, osize);
TORCH_CHECK(
grad_output.dim() == 4,
"Expected grad_output to be a tensor of dimension 4 but got: dimension ",
grad_output.dim());
for (int i = 0; i < 4; ++i) {
TORCH_CHECK(
grad_output.size(i) == full_output_size[i],
"Expected grad_output to have the same shape as output;",
" output.size(",
i,
") = ",
full_output_size[i],
" but got grad_output.size(",
i,
") = ",
grad_output.size(i));
}
grad_input.resize_(input_size, grad_output.suggest_memory_format());
at::native::internal_upsample::upsample_gen2d_backward_out_cuda_template<
interp_size>(
grad_input,
grad_output,
{full_output_size[2], full_output_size[3]},
input_size,
align_corners,
scale_h,
scale_w);
return grad_input;
}
at::Tensor interpolate_bilinear2d_aa_forward_kernel(
const at::Tensor& input,
at::IntArrayRef output_size,
bool align_corners) {
return interpolate_gen2d_aa_forward_kernel<2>(
input, output_size, align_corners);
}
at::Tensor interpolate_bicubic2d_aa_forward_kernel(
const at::Tensor& input,
at::IntArrayRef output_size,
bool align_corners) {
return interpolate_gen2d_aa_forward_kernel<4>(
input, output_size, align_corners);
}
at::Tensor interpolate_bilinear2d_aa_backward_kernel(
const at::Tensor& grad_output,
at::IntArrayRef output_size,
at::IntArrayRef input_size,
bool align_corners) {
return interpolate_gen2d_aa_backward_kernel<2>(
grad_output, output_size, input_size, align_corners);
}
at::Tensor interpolate_bicubic2d_aa_backward_kernel(
const at::Tensor& grad_output,
at::IntArrayRef output_size,
at::IntArrayRef input_size,
bool align_corners) {
return interpolate_gen2d_aa_backward_kernel<4>(
grad_output, output_size, input_size, align_corners);
}
} // namespace
TORCH_LIBRARY_IMPL(torchvision, CUDA, m) {
m.impl(
TORCH_SELECTIVE_NAME("torchvision::_interpolate_bilinear2d_aa"),
TORCH_FN(interpolate_bilinear2d_aa_forward_kernel));
m.impl(
TORCH_SELECTIVE_NAME("torchvision::_interpolate_bicubic2d_aa"),
TORCH_FN(interpolate_bicubic2d_aa_forward_kernel));
m.impl(
TORCH_SELECTIVE_NAME("torchvision::_interpolate_bilinear2d_aa_backward"),
TORCH_FN(interpolate_bilinear2d_aa_backward_kernel));
m.impl(
TORCH_SELECTIVE_NAME("torchvision::_interpolate_bicubic2d_aa_backward"),
TORCH_FN(interpolate_bicubic2d_aa_backward_kernel));
}
} // namespace ops
} // namespace vision
| 1b6f0262a8132a28b2c901428b67ad5e9fb732d9.cu | #include <torch/library.h>
// Copied and adapted from
// Adapted from interp.cpp from Caffe util by Pauline Luc
// Originally developed by George Papandreou
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/cuda/KernelUtils.cuh>
#include <ATen/native/cuda/UpSample.cuh>
// Below is experimental temporary code before merging it to PyTorch
namespace at {
namespace native {
namespace internal_upsample {
__device__ __forceinline__ size_t
idx(const size_t nc,
const size_t height,
const size_t width,
const size_t y,
const size_t x) {
return (nc * height + y) * width + x;
}
// taken from
// https://github.com/python-pillow/Pillow/blob/6812205f18ca4ef54372e87e1a13ce4a859434df/
// src/libImaging/Resample.c#L20-L29
template <typename accscalar_t>
__device__ __forceinline__ static accscalar_t bilinear_filter(accscalar_t x) {
if (x < 0.0) {
x = -x;
}
if (x < 1.0) {
return static_cast<accscalar_t>(1.0) - x;
}
return static_cast<accscalar_t>(0.0);
}
// taken from
// https://github.com/python-pillow/Pillow/blob/6812205f18ca4ef54372e87e1a13ce4a859434df/
// src/libImaging/Resample.c#L46-L62
template <typename accscalar_t>
__device__ __forceinline__ static accscalar_t bicubic_filter(accscalar_t x) {
// https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm
#define a -0.5
if (x < 0.0) {
x = -x;
}
if (x < 1.0) {
return ((a + 2.0) * x - (a + 3.0)) * x * x + static_cast<accscalar_t>(1.0);
}
if (x < 2.0) {
return (((x - 5) * x + 8) * x - 4) * a;
}
return static_cast<accscalar_t>(0.0);
#undef a
}
template <typename scalar_t, typename accscalar_t, typename filter_fn_t>
__device__ __forceinline__ static void _compute_weights(
const int i,
const int input_size,
const accscalar_t scale,
const accscalar_t support,
scalar_t* wt_ptr,
int interp_size,
filter_fn_t filter_fn,
int& xmin,
int& xmax) {
accscalar_t invscale = (scale >= 1.0) ? 1.0 / scale : 1.0;
accscalar_t center = scale * (i + 0.5);
xmin = max(static_cast<int>(center - support + 0.5), static_cast<int>(0));
xmax = min(static_cast<int>(center + support + 0.5), input_size) - xmin;
accscalar_t total_w = 0.0;
int j = 0;
for (j = 0; j < xmax; j++) {
accscalar_t w = filter_fn((j + xmin - center + 0.5) * invscale);
wt_ptr[j] = static_cast<scalar_t>(w);
total_w += w;
}
for (j = 0; j < xmax; j++) {
if (total_w != 0.0) {
wt_ptr[j] /= total_w;
}
}
for (; j < interp_size; j++) {
wt_ptr[j] = static_cast<scalar_t>(0.0);
}
}
template <typename scalar_t, typename accscalar_t>
__device__ __forceinline__ static accscalar_t interpolate_aa_single_dim(
scalar_t* src,
scalar_t* weights,
int64_t size) {
scalar_t t = static_cast<accscalar_t>(*src);
scalar_t wts = static_cast<accscalar_t>(weights[0]);
accscalar_t output = t * wts;
int64_t j = 1;
for (; j < size; j++) {
wts = static_cast<accscalar_t>(weights[j]);
t = static_cast<accscalar_t>(*(src + j));
output += t * wts;
}
return output;
}
template <typename scalar_t, typename accscalar_t, int interp_size>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_gen2d_out_frame(
const int n,
const accscalar_t rheight,
const accscalar_t rwidth,
const bool align_corners,
const PackedTensorAccessor64<scalar_t, 4> idata,
PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int height1 = idata.size(2);
const int width1 = idata.size(3);
const int height2 = odata.size(2);
const int width2 = odata.size(3);
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
// special case: just copy
if (height1 == height2 && width1 == width2) {
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = idata[n][c][h1][w1];
odata[n][c][h2][w2] = val;
}
}
return;
}
const accscalar_t support_h = static_cast<accscalar_t>(
(rheight >= 1.0) ? (interp_size * 0.5) * rheight : interp_size * 0.5);
const accscalar_t support_w = static_cast<accscalar_t>(
(rwidth >= 1.0) ? (interp_size * 0.5) * rwidth : interp_size * 0.5);
const int interp_height = (int)ceilf(support_h) * 2 + 1;
const int interp_width = (int)ceilf(support_w) * 2 + 1;
// Setup local buffers
// TODO: maybe we can specify dynamic shared memory size before calling the
// cuda code, however we should then ensure that device has enough shared
// memory
scalar_t wx[256];
scalar_t wy[256];
scalar_t buffer1[256];
scalar_t buffer2[256];
// Compute weights
int xmin, xsize, ymin, ysize;
typedef scalar_t (*filter_fn_t)(scalar_t);
filter_fn_t filter_fn;
if (interp_size == 2) {
filter_fn = bilinear_filter;
} else if (interp_size == 4) {
filter_fn = bicubic_filter;
}
_compute_weights<scalar_t, accscalar_t, filter_fn_t>(
w2,
width1,
rwidth,
support_w,
wx,
interp_width,
filter_fn,
xmin,
xsize);
_compute_weights<scalar_t, accscalar_t, filter_fn_t>(
h2,
height1,
rheight,
support_h,
wy,
interp_height,
filter_fn,
ymin,
ysize);
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
// interpolate on x-axis for ymin to ymin + ysize
for (int y = 0; y < ysize; y++) {
// copy data into the local buffer and use
// interpolate_aa_single_dim method
for (int x = 0; x < xsize; x++) {
buffer1[x] = idata[n][c][ymin + y][xmin + x];
}
buffer2[y] = static_cast<scalar_t>(
interpolate_aa_single_dim<scalar_t, accscalar_t>(
buffer1, wx, xsize));
}
odata[n][c][h2][w2] = static_cast<scalar_t>(
interpolate_aa_single_dim<scalar_t, accscalar_t>(
buffer2, wy, ysize));
}
}
}
}
template <int interp_size>
static void upsample_gen2d_out_cuda_template(
const Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
// Copied and adapted from
// UpSampleBicubic2d.cu::upsample_bicubic2d_out_cuda_template
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_gen2d_out_cuda", {input_arg, output_arg});
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input.size(0);
int channels = input.size(1);
int input_height = input.size(2);
int input_width = input.size(3);
const int num_kernels = output_height * output_width;
const int num_threads = std::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_gen2d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor64<scalar_t, 4>();
auto odata = output.packed_accessor64<scalar_t, 4>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
// We are using static buffer memory of 256 * sizeof(float) per thread
// to store weights. Size of weights array is
// interp_size = scale * 2 + 1 for bilinear mode
TORCH_CHECK(
rheight < (255 / interp_size),
"Max supported scale factor is 127 (bilinear), 63 (bicubic)");
TORCH_CHECK(
rwidth < (255 / interp_size),
"Max supported scale factor is 127 (bilinear), 63 (bicubic)");
upsample_gen2d_out_frame<scalar_t, accscalar_t, interp_size>
<<<cuda::ATenCeilDiv(num_kernels, num_threads),
num_threads,
0,
stream>>>(
num_kernels, rheight, rwidth, align_corners, idata, odata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename scalar_t, typename accscalar_t, int interp_size>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_gen2d_backward_out_frame(
const int num_elements,
const accscalar_t height_scale,
const accscalar_t width_scale,
const bool align_corners,
PackedTensorAccessor64<scalar_t, 4> idata,
const PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int input_height = idata.size(2);
const int input_width = idata.size(3);
const int output_height = odata.size(2);
const int output_width = odata.size(3);
if (index >= num_elements) {
return;
}
const int output_x = index % output_width;
const int output_y = index / output_width;
// special case: output just copy
if (input_height == output_height && input_width == output_width) {
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = odata[n][c][output_y][output_x];
idata[n][c][output_y][output_x] = val;
}
}
return;
}
const accscalar_t support_h = static_cast<accscalar_t>(
(height_scale >= 1.0) ? (interp_size * 0.5) * height_scale
: interp_size * 0.5);
const accscalar_t support_w = static_cast<accscalar_t>(
(width_scale >= 1.0) ? (interp_size * 0.5) * width_scale
: interp_size * 0.5);
const int interp_height = (int)ceilf(support_h) * 2 + 1;
const int interp_width = (int)ceilf(support_w) * 2 + 1;
// Setup local buffers
// TODO: maybe we can specify dynamic shared memory size before calling the
// cuda code, however we should then ensure that device has enough shared
// memory
scalar_t wx[256];
scalar_t wy[256];
// Compute weights
int xmin, xsize, ymin, ysize;
typedef scalar_t (*filter_fn_t)(scalar_t);
filter_fn_t filter_fn;
if (interp_size == 2) {
filter_fn = bilinear_filter;
} else if (interp_size == 4) {
filter_fn = bicubic_filter;
}
_compute_weights<scalar_t, accscalar_t, filter_fn_t>(
output_x,
input_width,
width_scale,
support_w,
wx,
interp_width,
filter_fn,
xmin,
xsize);
_compute_weights<scalar_t, accscalar_t, filter_fn_t>(
output_y,
input_height,
height_scale,
support_h,
wy,
interp_height,
filter_fn,
ymin,
ysize);
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
scalar_t out_value = odata[n][c][output_y][output_x];
for (int y = 0; y < ysize; y++) {
for (int x = 0; x < xsize; x++) {
upsample_increment_value_bounded<scalar_t, accscalar_t>(
idata,
n,
c,
input_height,
input_width,
ymin + y,
xmin + x,
wx[x] * wy[y] * out_value);
}
}
}
}
}
template <int interp_size>
static void upsample_gen2d_backward_out_cuda_template(
const Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
// Copied and adapted from
// UpSampleBicubic2d.cu::upsample_bicubic2d_backward_out_cuda_template
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(
"upsample_gen2d_backward_out_cuda", {grad_output_arg, grad_input_arg});
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input_size[0];
int channels = input_size[1];
int input_height = input_size[2];
int input_width = input_size[3];
Tensor grad_output = grad_output_.contiguous();
grad_input.zero_();
const int num_kernels = output_height * output_width;
const int num_threads = std::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "upsample_gen2d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.packed_accessor64<scalar_t, 4>();
auto odata = grad_output.packed_accessor64<scalar_t, 4>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
// We are using static buffer memory of 256 * sizeof(float) per thread
// to store weights. Size of weights array is
// interp_size = scale * 2 + 1 for bilinear mode
TORCH_CHECK(
rheight < (255 / interp_size),
"Max supported scale factor is 127 (bilinear), 63 (bicubic)");
TORCH_CHECK(
rwidth < (255 / interp_size),
"Max supported scale factor is 127 (bilinear), 63 (bicubic)");
upsample_gen2d_backward_out_frame<scalar_t, accscalar_t, interp_size>
<<<cuda::ATenCeilDiv(num_kernels, num_threads),
num_threads,
0,
stream>>>(
num_kernels, rheight, rwidth, align_corners, idata, odata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
} // namespace internal_upsample
} // namespace native
} // namespace at
namespace vision {
namespace ops {
namespace {
// Copied from "UpSample.h" as we can not use UpSample.h with UpSample.cuh
static std::array<int64_t, 4> upsample_2d_common_check(
at::IntArrayRef input_size,
at::IntArrayRef output_size) {
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
TORCH_CHECK(
input_size.size() == 4,
"It is expected input_size equals to 4, but got size ",
input_size.size());
int64_t output_height = output_size[0];
int64_t output_width = output_size[1];
int64_t nbatch = input_size[0];
int64_t channels = input_size[1];
int64_t input_height = input_size[2];
int64_t input_width = input_size[3];
TORCH_CHECK(
input_height > 0 && input_width > 0 && output_height > 0 &&
output_width > 0,
"Input and output sizes should be greater than 0,"
" but got input (H: ",
input_height,
", W: ",
input_width,
") output (H: ",
output_height,
", W: ",
output_width,
")");
return {nbatch, channels, output_height, output_width};
}
template <int interp_size>
at::Tensor interpolate_gen2d_aa_forward_kernel(
const at::Tensor& input,
at::IntArrayRef output_size,
bool align_corners) {
c10::optional<c10::ArrayRef<double>> scale_factors = {};
// Copied from UpSampleBilinear2d.cpp
auto output = at::empty({0}, input.options());
auto osize = at::native::upsample::compute_output_size(
input.sizes(), output_size, scale_factors);
auto scale_h = at::native::upsample_cuda::get_scale_value(scale_factors, 0);
auto scale_w = at::native::upsample_cuda::get_scale_value(scale_factors, 1);
auto full_output_size = upsample_2d_common_check(input.sizes(), osize);
// Allow for empty batch size but not other dimensions
TORCH_CHECK(
input.numel() != 0 ||
c10::multiply_integers(
input.sizes().begin() + 1, input.sizes().end()),
"Non-empty 4D data tensor expected but got a tensor with sizes ",
input.sizes());
output.resize_(full_output_size, input.suggest_memory_format());
at::native::internal_upsample::upsample_gen2d_out_cuda_template<interp_size>(
output,
input,
{full_output_size[2], full_output_size[3]},
align_corners,
scale_h,
scale_w);
return output;
}
template <int interp_size>
at::Tensor interpolate_gen2d_aa_backward_kernel(
const at::Tensor& grad_output,
at::IntArrayRef output_size,
at::IntArrayRef input_size,
bool align_corners) {
c10::optional<c10::ArrayRef<double>> scale_factors = {};
// Copied from UpSampleBicubic2d.cpp::upsample_bicubic2d_backward
auto grad_input = at::empty({0}, grad_output.options());
auto osize = at::native::upsample::compute_output_size(
input_size, output_size, scale_factors);
auto scale_h = at::native::upsample_cuda::get_scale_value(scale_factors, 0);
auto scale_w = at::native::upsample_cuda::get_scale_value(scale_factors, 1);
auto full_output_size = upsample_2d_common_check(input_size, osize);
TORCH_CHECK(
grad_output.dim() == 4,
"Expected grad_output to be a tensor of dimension 4 but got: dimension ",
grad_output.dim());
for (int i = 0; i < 4; ++i) {
TORCH_CHECK(
grad_output.size(i) == full_output_size[i],
"Expected grad_output to have the same shape as output;",
" output.size(",
i,
") = ",
full_output_size[i],
" but got grad_output.size(",
i,
") = ",
grad_output.size(i));
}
grad_input.resize_(input_size, grad_output.suggest_memory_format());
at::native::internal_upsample::upsample_gen2d_backward_out_cuda_template<
interp_size>(
grad_input,
grad_output,
{full_output_size[2], full_output_size[3]},
input_size,
align_corners,
scale_h,
scale_w);
return grad_input;
}
at::Tensor interpolate_bilinear2d_aa_forward_kernel(
const at::Tensor& input,
at::IntArrayRef output_size,
bool align_corners) {
return interpolate_gen2d_aa_forward_kernel<2>(
input, output_size, align_corners);
}
at::Tensor interpolate_bicubic2d_aa_forward_kernel(
const at::Tensor& input,
at::IntArrayRef output_size,
bool align_corners) {
return interpolate_gen2d_aa_forward_kernel<4>(
input, output_size, align_corners);
}
at::Tensor interpolate_bilinear2d_aa_backward_kernel(
const at::Tensor& grad_output,
at::IntArrayRef output_size,
at::IntArrayRef input_size,
bool align_corners) {
return interpolate_gen2d_aa_backward_kernel<2>(
grad_output, output_size, input_size, align_corners);
}
at::Tensor interpolate_bicubic2d_aa_backward_kernel(
const at::Tensor& grad_output,
at::IntArrayRef output_size,
at::IntArrayRef input_size,
bool align_corners) {
return interpolate_gen2d_aa_backward_kernel<4>(
grad_output, output_size, input_size, align_corners);
}
} // namespace
TORCH_LIBRARY_IMPL(torchvision, CUDA, m) {
m.impl(
TORCH_SELECTIVE_NAME("torchvision::_interpolate_bilinear2d_aa"),
TORCH_FN(interpolate_bilinear2d_aa_forward_kernel));
m.impl(
TORCH_SELECTIVE_NAME("torchvision::_interpolate_bicubic2d_aa"),
TORCH_FN(interpolate_bicubic2d_aa_forward_kernel));
m.impl(
TORCH_SELECTIVE_NAME("torchvision::_interpolate_bilinear2d_aa_backward"),
TORCH_FN(interpolate_bilinear2d_aa_backward_kernel));
m.impl(
TORCH_SELECTIVE_NAME("torchvision::_interpolate_bicubic2d_aa_backward"),
TORCH_FN(interpolate_bicubic2d_aa_backward_kernel));
}
} // namespace ops
} // namespace vision
|
8e0d793878ed7e3172827e01683430d647d45367.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//=======================================================================
// Copyright (c) 2017 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include <random>
#include <iostream>
#include "hiprand/hiprand_kernel.h"
#include "egblas/dropout.hpp"
#include "egblas/cuda_check.hpp"
// Kernel to setup the random states
__global__ void setup_kernel(hiprandState_t* states, size_t seed) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
hiprand_init(seed, id, 0, &states[id]);
}
// Kernels for dropout
template <typename T>
__global__ void dropout_kernel(hiprandState_t* states, size_t n, T p, T alpha, T* y, size_t incy) {
auto base_index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
T r;
// Copy state to local memory for efficiency
auto local_state = states[base_index];
for (auto index = base_index; index < n; index += stride) {
r = hiprand_uniform(&local_state);
if(r < p){
y[incy * index] = T(0);
} else {
y[incy * index] = alpha * T(1);
}
}
// Copy state back to global memory
states[base_index] = local_state;
}
template <typename T>
__global__ void dropout_kernel1(hiprandState_t* states, size_t n, T p, T* y, size_t incy) {
auto base_index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
T r;
//Copy state to local memory for efficiency
auto local_state = states[base_index];
for (auto index = base_index; index < n; index += stride) {
r = hiprand_uniform(&local_state);
if(r < p){
y[incy * index] = T(0);
} else {
y[incy * index] = T(1);
}
}
// Copy state back to global memory
states[base_index] = local_state;
}
// Kernel for inverted dropout
template <typename T>
__global__ void inv_dropout_kernel(hiprandState_t* states, size_t n, T p, T alpha, T* y, size_t incy) {
auto base_index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
T r;
// Copy state to local memory for efficiency
auto local_state = states[base_index];
for (auto index = base_index; index < n; index += stride) {
r = hiprand_uniform(&local_state);
if(r < p){
y[incy * index] = T(0);
} else {
y[incy * index] = alpha * (T(1) / (T(1) - p));
}
}
// Copy state back to global memory
states[base_index] = local_state;
}
template <typename T>
__global__ void inv_dropout_kernel1(hiprandState_t* states, size_t n, T p, T* y, size_t incy) {
auto base_index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
T r;
//Copy state to local memory for efficiency
auto local_state = states[base_index];
for (auto index = base_index; index < n; index += stride) {
r = hiprand_uniform(&local_state);
if(r < p){
y[incy * index] = T(0);
} else {
y[incy * index] = T(1) / (T(1) - p);
}
}
// Copy state back to global memory
states[base_index] = local_state;
}
// Kernel for reset
template <typename T>
__global__ void dropout_kernel0(size_t n, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = T(0);
}
}
template <typename T>
void dropout_kernel0_run(size_t n, T* y, size_t incy) {
int blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, dropout_kernel0<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( dropout_kernel0<T>), dim3(gridSize), dim3(blockSize), 0, 0, n, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
hipDeviceSynchronize();
#endif
}
// Preparation
void* egblas_dropout_prepare(){
std::random_device rd;
return egblas_dropout_prepare_seed(rd());
}
void* egblas_dropout_prepare_seed(size_t seed){
// Allocate room for the states
hiprandState_t* states;
cuda_check(hipMalloc((void**)&states, 64 * 64 * sizeof(hiprandState_t)));
// Initialize the seeds
hipLaunchKernelGGL(( setup_kernel), dim3(64), dim3(64), 0, 0, states, seed);
return states;
}
void egblas_dropout_release(void* states){
// Free the states
cuda_check(hipFree(states));
}
// Regular dropout
void egblas_sdropout_seed(size_t n, float p, float alpha, float* x, size_t incx, size_t seed) {
if (alpha == 0.0f) {
dropout_kernel0_run(n, x, incx);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
if (n <= 100) {
gridSize = 1;
blockSize = 64;
} else if(n <= 1000){
gridSize = 8;
blockSize = 64;
} else if(n <= 10000){
gridSize = 16;
blockSize = 64;
} else if(n <= 100000){
gridSize = 32;
blockSize = 64;
}
// Allocate room for the states
hiprandState_t* states;
cuda_check(hipMalloc((void**)&states, gridSize * blockSize * sizeof(hiprandState_t)));
// Initialize the seeds
hipLaunchKernelGGL(( setup_kernel), dim3(gridSize), dim3(blockSize), 0, 0, states, seed);
// Compute the dropout mask
if (alpha == 1.0f) {
hipLaunchKernelGGL(( dropout_kernel1<float>), dim3(gridSize), dim3(blockSize), 0, 0, states, n, p, x, incx);
} else {
hipLaunchKernelGGL(( dropout_kernel<float>), dim3(gridSize), dim3(blockSize), 0, 0, states, n, p, alpha, x, incx);
}
// Free the states
cuda_check(hipFree(states));
}
void egblas_sdropout(size_t n, float p, float alpha, float* x, size_t incx) {
std::random_device rd;
egblas_sdropout_seed(n, p, alpha, x, incx, rd());
}
void egblas_ddropout_seed(size_t n, double p, double alpha, double* x, size_t incx, size_t seed) {
if (alpha == 0.0f) {
dropout_kernel0_run(n, x, incx);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
if (n <= 100) {
gridSize = 1;
blockSize = 64;
} else if(n <= 1000){
gridSize = 8;
blockSize = 64;
} else if(n <= 10000){
gridSize = 16;
blockSize = 64;
} else if(n <= 100000){
gridSize = 32;
blockSize = 64;
}
// Allocate room for the states
hiprandState_t* states;
cuda_check(hipMalloc((void**)&states, gridSize * blockSize * sizeof(hiprandState_t)));
// Initialize the seeds
hipLaunchKernelGGL(( setup_kernel), dim3(gridSize), dim3(blockSize), 0, 0, states, seed);
// Compute the dropout mask
if (alpha == 1.0f) {
hipLaunchKernelGGL(( dropout_kernel1<double>), dim3(gridSize), dim3(blockSize), 0, 0, states, n, p, x, incx);
} else {
hipLaunchKernelGGL(( dropout_kernel<double>), dim3(gridSize), dim3(blockSize), 0, 0, states, n, p, alpha, x, incx);
}
// Free the states
cuda_check(hipFree(states));
}
void egblas_ddropout(size_t n, double p, double alpha, double* x, size_t incx) {
std::random_device rd;
egblas_ddropout_seed(n, p, alpha, x, incx, rd());
}
// Inverted dropout
void egblas_sinv_dropout_seed(size_t n, float p, float alpha, float* x, size_t incx, size_t seed) {
if (alpha == 0.0f) {
dropout_kernel0_run(n, x, incx);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
if (n <= 100) {
gridSize = 1;
blockSize = 64;
} else if(n <= 1000){
gridSize = 8;
blockSize = 64;
} else if(n <= 10000){
gridSize = 16;
blockSize = 64;
} else if(n <= 100000){
gridSize = 32;
blockSize = 64;
}
// Allocate room for the states
hiprandState_t* states;
cuda_check(hipMalloc((void**)&states, gridSize * blockSize * sizeof(hiprandState_t)));
// Initialize the seeds
hipLaunchKernelGGL(( setup_kernel), dim3(gridSize), dim3(blockSize), 0, 0, states, seed);
// Compute the dropout mask
if (alpha == 1.0f) {
hipLaunchKernelGGL(( inv_dropout_kernel1<float>), dim3(gridSize),dim3(blockSize), 0, 0, states, n, p, x, incx);
} else {
hipLaunchKernelGGL(( inv_dropout_kernel<float>), dim3(gridSize),dim3(blockSize), 0, 0, states, n, p, alpha, x, incx);
}
// Free the states
cuda_check(hipFree(states));
}
void egblas_sinv_dropout(size_t n, float p, float alpha, float* x, size_t incx) {
std::random_device rd;
egblas_sinv_dropout_seed(n, p, alpha, x, incx, rd());
}
void egblas_dinv_dropout_seed(size_t n, double p, double alpha, double* x, size_t incx, size_t seed) {
if (alpha == 0.0) {
dropout_kernel0_run(n, x, incx);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
if (n <= 100) {
gridSize = 1;
blockSize = 64;
} else if(n <= 1000){
gridSize = 8;
blockSize = 64;
} else if(n <= 10000){
gridSize = 16;
blockSize = 64;
} else if(n <= 100000){
gridSize = 32;
blockSize = 64;
}
// Allocate room for the states
hiprandState_t* states;
cuda_check(hipMalloc((void**)&states, gridSize * blockSize * sizeof(hiprandState_t)));
// Initialize the seeds
hipLaunchKernelGGL(( setup_kernel), dim3(gridSize), dim3(blockSize), 0, 0, states, seed);
// Compute the dropout mask
if (alpha == 1.0f) {
hipLaunchKernelGGL(( inv_dropout_kernel1<double>), dim3(gridSize), dim3(blockSize), 0, 0, states, n, p, x, incx);
} else {
hipLaunchKernelGGL(( inv_dropout_kernel<double>), dim3(gridSize), dim3(blockSize), 0, 0, states, n, p, alpha, x, incx);
}
// Free the states
cuda_check(hipFree(states));
}
void egblas_dinv_dropout(size_t n, double p, double alpha, double* x, size_t incx) {
std::random_device rd;
egblas_dinv_dropout_seed(n, p, alpha, x, incx, rd());
}
void egblas_sinv_dropout_states(size_t n, float p, float alpha, float* x, size_t incx, void* states) {
if (alpha == 0.0f) {
dropout_kernel0_run(n, x, incx);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
// Compute the dropout mask
if (alpha == 1.0f) {
hipLaunchKernelGGL(( inv_dropout_kernel1<float>), dim3(gridSize),dim3(blockSize), 0, 0, reinterpret_cast<hiprandState_t*>(states), n, p, x, incx);
} else {
hipLaunchKernelGGL(( inv_dropout_kernel<float>), dim3(gridSize),dim3(blockSize), 0, 0, reinterpret_cast<hiprandState_t*>(states), n, p, alpha, x, incx);
}
}
void egblas_dinv_dropout_states(size_t n, double p, double alpha, double* x, size_t incx, void* states) {
if (alpha == 0.0) {
dropout_kernel0_run(n, x, incx);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
// Compute the dropout mask
if (alpha == 1.0) {
hipLaunchKernelGGL(( inv_dropout_kernel1<double>), dim3(gridSize),dim3(blockSize), 0, 0, reinterpret_cast<hiprandState_t*>(states), n, p, x, incx);
} else {
hipLaunchKernelGGL(( inv_dropout_kernel<double>), dim3(gridSize),dim3(blockSize), 0, 0, reinterpret_cast<hiprandState_t*>(states), n, p, alpha, x, incx);
}
}
void egblas_sdropout_states(size_t n, float p, float alpha, float* x, size_t incx, void* states) {
if (alpha == 0.0f) {
dropout_kernel0_run(n, x, incx);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
// Compute the dropout mask
if (alpha == 1.0f) {
hipLaunchKernelGGL(( dropout_kernel1<float>), dim3(gridSize),dim3(blockSize), 0, 0, reinterpret_cast<hiprandState_t*>(states), n, p, x, incx);
} else {
hipLaunchKernelGGL(( dropout_kernel<float>), dim3(gridSize),dim3(blockSize), 0, 0, reinterpret_cast<hiprandState_t*>(states), n, p, alpha, x, incx);
}
}
void egblas_ddropout_states(size_t n, double p, double alpha, double* x, size_t incx, void* states) {
if (alpha == 0.0) {
dropout_kernel0_run(n, x, incx);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
// Compute the dropout mask
if (alpha == 1.0) {
hipLaunchKernelGGL(( dropout_kernel1<double>), dim3(gridSize),dim3(blockSize), 0, 0, reinterpret_cast<hiprandState_t*>(states), n, p, x, incx);
} else {
hipLaunchKernelGGL(( dropout_kernel<double>), dim3(gridSize),dim3(blockSize), 0, 0, reinterpret_cast<hiprandState_t*>(states), n, p, alpha, x, incx);
}
}
| 8e0d793878ed7e3172827e01683430d647d45367.cu | //=======================================================================
// Copyright (c) 2017 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include <random>
#include <iostream>
#include "curand_kernel.h"
#include "egblas/dropout.hpp"
#include "egblas/cuda_check.hpp"
// Kernel to setup the random states
__global__ void setup_kernel(curandState* states, size_t seed) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(seed, id, 0, &states[id]);
}
// Kernels for dropout
template <typename T>
__global__ void dropout_kernel(curandState* states, size_t n, T p, T alpha, T* y, size_t incy) {
auto base_index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
T r;
// Copy state to local memory for efficiency
auto local_state = states[base_index];
for (auto index = base_index; index < n; index += stride) {
r = curand_uniform(&local_state);
if(r < p){
y[incy * index] = T(0);
} else {
y[incy * index] = alpha * T(1);
}
}
// Copy state back to global memory
states[base_index] = local_state;
}
template <typename T>
__global__ void dropout_kernel1(curandState* states, size_t n, T p, T* y, size_t incy) {
auto base_index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
T r;
//Copy state to local memory for efficiency
auto local_state = states[base_index];
for (auto index = base_index; index < n; index += stride) {
r = curand_uniform(&local_state);
if(r < p){
y[incy * index] = T(0);
} else {
y[incy * index] = T(1);
}
}
// Copy state back to global memory
states[base_index] = local_state;
}
// Kernel for inverted dropout
template <typename T>
__global__ void inv_dropout_kernel(curandState* states, size_t n, T p, T alpha, T* y, size_t incy) {
auto base_index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
T r;
// Copy state to local memory for efficiency
auto local_state = states[base_index];
for (auto index = base_index; index < n; index += stride) {
r = curand_uniform(&local_state);
if(r < p){
y[incy * index] = T(0);
} else {
y[incy * index] = alpha * (T(1) / (T(1) - p));
}
}
// Copy state back to global memory
states[base_index] = local_state;
}
template <typename T>
__global__ void inv_dropout_kernel1(curandState* states, size_t n, T p, T* y, size_t incy) {
auto base_index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
T r;
//Copy state to local memory for efficiency
auto local_state = states[base_index];
for (auto index = base_index; index < n; index += stride) {
r = curand_uniform(&local_state);
if(r < p){
y[incy * index] = T(0);
} else {
y[incy * index] = T(1) / (T(1) - p);
}
}
// Copy state back to global memory
states[base_index] = local_state;
}
// Kernel for reset
template <typename T>
__global__ void dropout_kernel0(size_t n, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = T(0);
}
}
template <typename T>
void dropout_kernel0_run(size_t n, T* y, size_t incy) {
int blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, dropout_kernel0<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
dropout_kernel0<T><<<gridSize, blockSize>>>(n, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
cudaDeviceSynchronize();
#endif
}
// Preparation
void* egblas_dropout_prepare(){
std::random_device rd;
return egblas_dropout_prepare_seed(rd());
}
void* egblas_dropout_prepare_seed(size_t seed){
// Allocate room for the states
curandState* states;
cuda_check(cudaMalloc((void**)&states, 64 * 64 * sizeof(curandState)));
// Initialize the seeds
setup_kernel<<<64, 64>>>(states, seed);
return states;
}
void egblas_dropout_release(void* states){
// Free the states
cuda_check(cudaFree(states));
}
// Regular dropout
void egblas_sdropout_seed(size_t n, float p, float alpha, float* x, size_t incx, size_t seed) {
if (alpha == 0.0f) {
dropout_kernel0_run(n, x, incx);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
if (n <= 100) {
gridSize = 1;
blockSize = 64;
} else if(n <= 1000){
gridSize = 8;
blockSize = 64;
} else if(n <= 10000){
gridSize = 16;
blockSize = 64;
} else if(n <= 100000){
gridSize = 32;
blockSize = 64;
}
// Allocate room for the states
curandState* states;
cuda_check(cudaMalloc((void**)&states, gridSize * blockSize * sizeof(curandState)));
// Initialize the seeds
setup_kernel<<<gridSize, blockSize>>>(states, seed);
// Compute the dropout mask
if (alpha == 1.0f) {
dropout_kernel1<float><<<gridSize, blockSize>>>(states, n, p, x, incx);
} else {
dropout_kernel<float><<<gridSize, blockSize>>>(states, n, p, alpha, x, incx);
}
// Free the states
cuda_check(cudaFree(states));
}
void egblas_sdropout(size_t n, float p, float alpha, float* x, size_t incx) {
std::random_device rd;
egblas_sdropout_seed(n, p, alpha, x, incx, rd());
}
void egblas_ddropout_seed(size_t n, double p, double alpha, double* x, size_t incx, size_t seed) {
if (alpha == 0.0f) {
dropout_kernel0_run(n, x, incx);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
if (n <= 100) {
gridSize = 1;
blockSize = 64;
} else if(n <= 1000){
gridSize = 8;
blockSize = 64;
} else if(n <= 10000){
gridSize = 16;
blockSize = 64;
} else if(n <= 100000){
gridSize = 32;
blockSize = 64;
}
// Allocate room for the states
curandState* states;
cuda_check(cudaMalloc((void**)&states, gridSize * blockSize * sizeof(curandState)));
// Initialize the seeds
setup_kernel<<<gridSize, blockSize>>>(states, seed);
// Compute the dropout mask
if (alpha == 1.0f) {
dropout_kernel1<double><<<gridSize, blockSize>>>(states, n, p, x, incx);
} else {
dropout_kernel<double><<<gridSize, blockSize>>>(states, n, p, alpha, x, incx);
}
// Free the states
cuda_check(cudaFree(states));
}
void egblas_ddropout(size_t n, double p, double alpha, double* x, size_t incx) {
std::random_device rd;
egblas_ddropout_seed(n, p, alpha, x, incx, rd());
}
// Inverted dropout
void egblas_sinv_dropout_seed(size_t n, float p, float alpha, float* x, size_t incx, size_t seed) {
if (alpha == 0.0f) {
dropout_kernel0_run(n, x, incx);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
if (n <= 100) {
gridSize = 1;
blockSize = 64;
} else if(n <= 1000){
gridSize = 8;
blockSize = 64;
} else if(n <= 10000){
gridSize = 16;
blockSize = 64;
} else if(n <= 100000){
gridSize = 32;
blockSize = 64;
}
// Allocate room for the states
curandState* states;
cuda_check(cudaMalloc((void**)&states, gridSize * blockSize * sizeof(curandState)));
// Initialize the seeds
setup_kernel<<<gridSize, blockSize>>>(states, seed);
// Compute the dropout mask
if (alpha == 1.0f) {
inv_dropout_kernel1<float><<<gridSize,blockSize>>>(states, n, p, x, incx);
} else {
inv_dropout_kernel<float><<<gridSize,blockSize>>>(states, n, p, alpha, x, incx);
}
// Free the states
cuda_check(cudaFree(states));
}
void egblas_sinv_dropout(size_t n, float p, float alpha, float* x, size_t incx) {
std::random_device rd;
egblas_sinv_dropout_seed(n, p, alpha, x, incx, rd());
}
void egblas_dinv_dropout_seed(size_t n, double p, double alpha, double* x, size_t incx, size_t seed) {
if (alpha == 0.0) {
dropout_kernel0_run(n, x, incx);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
if (n <= 100) {
gridSize = 1;
blockSize = 64;
} else if(n <= 1000){
gridSize = 8;
blockSize = 64;
} else if(n <= 10000){
gridSize = 16;
blockSize = 64;
} else if(n <= 100000){
gridSize = 32;
blockSize = 64;
}
// Allocate room for the states
curandState* states;
cuda_check(cudaMalloc((void**)&states, gridSize * blockSize * sizeof(curandState)));
// Initialize the seeds
setup_kernel<<<gridSize, blockSize>>>(states, seed);
// Compute the dropout mask
if (alpha == 1.0f) {
inv_dropout_kernel1<double><<<gridSize, blockSize>>>(states, n, p, x, incx);
} else {
inv_dropout_kernel<double><<<gridSize, blockSize>>>(states, n, p, alpha, x, incx);
}
// Free the states
cuda_check(cudaFree(states));
}
void egblas_dinv_dropout(size_t n, double p, double alpha, double* x, size_t incx) {
std::random_device rd;
egblas_dinv_dropout_seed(n, p, alpha, x, incx, rd());
}
void egblas_sinv_dropout_states(size_t n, float p, float alpha, float* x, size_t incx, void* states) {
if (alpha == 0.0f) {
dropout_kernel0_run(n, x, incx);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
// Compute the dropout mask
if (alpha == 1.0f) {
inv_dropout_kernel1<float><<<gridSize,blockSize>>>(reinterpret_cast<curandState*>(states), n, p, x, incx);
} else {
inv_dropout_kernel<float><<<gridSize,blockSize>>>(reinterpret_cast<curandState*>(states), n, p, alpha, x, incx);
}
}
void egblas_dinv_dropout_states(size_t n, double p, double alpha, double* x, size_t incx, void* states) {
if (alpha == 0.0) {
dropout_kernel0_run(n, x, incx);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
// Compute the dropout mask
if (alpha == 1.0) {
inv_dropout_kernel1<double><<<gridSize,blockSize>>>(reinterpret_cast<curandState*>(states), n, p, x, incx);
} else {
inv_dropout_kernel<double><<<gridSize,blockSize>>>(reinterpret_cast<curandState*>(states), n, p, alpha, x, incx);
}
}
void egblas_sdropout_states(size_t n, float p, float alpha, float* x, size_t incx, void* states) {
if (alpha == 0.0f) {
dropout_kernel0_run(n, x, incx);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
// Compute the dropout mask
if (alpha == 1.0f) {
dropout_kernel1<float><<<gridSize,blockSize>>>(reinterpret_cast<curandState*>(states), n, p, x, incx);
} else {
dropout_kernel<float><<<gridSize,blockSize>>>(reinterpret_cast<curandState*>(states), n, p, alpha, x, incx);
}
}
void egblas_ddropout_states(size_t n, double p, double alpha, double* x, size_t incx, void* states) {
if (alpha == 0.0) {
dropout_kernel0_run(n, x, incx);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
// Compute the dropout mask
if (alpha == 1.0) {
dropout_kernel1<double><<<gridSize,blockSize>>>(reinterpret_cast<curandState*>(states), n, p, x, incx);
} else {
dropout_kernel<double><<<gridSize,blockSize>>>(reinterpret_cast<curandState*>(states), n, p, alpha, x, incx);
}
}
|
ec126077789afc9b39b941b086e3bd94bc10fb5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2023 The Regents of the University of Michigan.
// Part of HOOMD-blue, released under the BSD 3-Clause License.
#include "EAMForceGPU_hip.cuh"
#include "hoomd/TextureTools.h"
#include <assert.h>
/*! \file EAMForceGPU.cu
\brief Defines GPU kernel code for calculating the EAM forces. Used by EAMForceComputeGPU.
*/
namespace hoomd
{
namespace metal
{
namespace kernel
{
//! Kernel for computing EAM forces on the GPU
__global__ void gpu_kernel_1(Scalar4* d_force,
Scalar* d_virial,
const size_t virial_pitch,
const unsigned int N,
const Scalar4* d_pos,
BoxDim box,
const unsigned int* d_n_neigh,
const unsigned int* d_nlist,
const size_t* d_head_list,
const Scalar4* d_F,
const Scalar4* d_rho,
const Scalar4* d_rphi,
const Scalar4* d_dF,
const Scalar4* d_drho,
const Scalar4* d_drphi,
Scalar* d_dFdP,
const EAMTexInterData* d_eam_data)
{
__shared__ EAMTexInterData eam_data_ti;
// copy over parameters one int per thread
unsigned int tidx = threadIdx.x;
unsigned int block_size = blockDim.x;
unsigned int param_size = sizeof(EAMTexInterData) / sizeof(int);
for (unsigned int cur_offset = 0; cur_offset < param_size; cur_offset += block_size)
{
if (cur_offset + tidx < param_size)
{
((int*)&eam_data_ti)[cur_offset + tidx] = ((int*)d_eam_data)[cur_offset + tidx];
}
}
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// load in the length of the list
int n_neigh = d_n_neigh[idx];
const size_t head_idx = d_head_list[idx];
// read in the position of our particle.
Scalar4 postype = __ldg(d_pos + idx);
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
// index and remainder
Scalar position; // look up position, scalar
unsigned int int_position; // look up index for position, integer
unsigned int idxs; // look up index in F, rho, rphi array, considering shift, integer
Scalar remainder; // look up remainder in array, integer
Scalar4 v, dv; // value, d(value)
// initialize the force to 0
Scalar4 force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
// prefetch neighbor index
int cur_neigh = 0;
int next_neigh(0);
next_neigh = __ldg(d_nlist + head_idx);
int typei = __scalar_as_int(postype.w);
// loop over neighbors
Scalar atomElectronDensity = Scalar(0.0);
int ntypes = eam_data_ti.ntypes;
int nrho = eam_data_ti.nrho;
int nr = eam_data_ti.nr;
Scalar rdrho = eam_data_ti.rdrho;
Scalar rdr = eam_data_ti.rdr;
Scalar r_cutsq = eam_data_ti.r_cutsq;
for (int neigh_idx = 0; neigh_idx < n_neigh; neigh_idx++)
{
// read the current neighbor index
// prefetch the next value and set the current one
cur_neigh = next_neigh;
next_neigh = __ldg(d_nlist + head_idx + neigh_idx + 1);
// get the neighbor's position
Scalar4 neigh_postype = __ldg(d_pos + cur_neigh);
Scalar3 neigh_pos = make_scalar3(neigh_postype.x, neigh_postype.y, neigh_postype.z);
// calculate dr (with periodic boundary conditions)
Scalar3 dx = pos - neigh_pos;
int typej = __scalar_as_int(neigh_postype.w);
// apply periodic boundary conditions
dx = box.minImage(dx);
// calculate r squared
Scalar rsq = dot(dx, dx);
;
if (rsq < r_cutsq)
{
// calculate position r for rho(r)
position = sqrtf(rsq) * rdr;
int_position = (unsigned int)position;
int_position = min(int_position, nr - 1);
remainder = position - int_position;
// calculate P = sum{rho}
idxs = int_position + nr * (typej * ntypes + typei);
v = __ldg(d_rho + idxs);
atomElectronDensity += v.w + v.z * remainder + v.y * remainder * remainder
+ v.x * remainder * remainder * remainder;
}
}
// calculate position rho for F(rho)
position = atomElectronDensity * rdrho;
int_position = (unsigned int)position;
int_position = min(int_position, nrho - 1);
remainder = position - int_position;
idxs = int_position + typei * nrho;
dv = __ldg(d_dF + idxs);
v = __ldg(d_F + idxs);
// compute dF / dP
d_dFdP[idx] = dv.z + dv.y * remainder + dv.x * remainder * remainder;
// compute embedded energy F(P), sum up each particle
force.w += v.w + v.z * remainder + v.y * remainder * remainder
+ v.x * remainder * remainder * remainder;
// update the d_force
d_force[idx] = force;
}
//! Second stage kernel for computing EAM forces on the GPU
__global__ void gpu_kernel_2(Scalar4* d_force,
Scalar* d_virial,
const size_t virial_pitch,
const unsigned int N,
const Scalar4* d_pos,
BoxDim box,
const unsigned int* d_n_neigh,
const unsigned int* d_nlist,
const size_t* d_head_list,
const Scalar4* d_F,
const Scalar4* d_rho,
const Scalar4* d_rphi,
const Scalar4* d_dF,
const Scalar4* d_drho,
const Scalar4* d_drphi,
Scalar* d_dFdP,
const EAMTexInterData* d_eam_data)
{
__shared__ EAMTexInterData eam_data_ti;
// copy over parameters one int per thread
unsigned int tidx = threadIdx.x;
unsigned int block_size = blockDim.x;
unsigned int param_size = sizeof(EAMTexInterData) / sizeof(int);
for (unsigned int cur_offset = 0; cur_offset < param_size; cur_offset += block_size)
{
if (cur_offset + tidx < param_size)
{
((int*)&eam_data_ti)[cur_offset + tidx] = ((int*)d_eam_data)[cur_offset + tidx];
}
}
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// load in the length of the list
int n_neigh = d_n_neigh[idx];
const size_t head_idx = d_head_list[idx];
// read in the position of our particle. Texture reads of Scalar4's are faster than global reads
Scalar4 postype = __ldg(d_pos + idx);
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
int typei = __scalar_as_int(postype.w);
// index and remainder
Scalar position; // look up position, scalar
unsigned int int_position; // look up index for position, integer
unsigned int idxs; // look up index in F, rho, rphi array, considering shift, integer
Scalar remainder; // look up remainder in array, integer
Scalar4 v, dv; // value, d(value)
// prefetch neighbor index
int cur_neigh = 0;
int next_neigh(0);
next_neigh = __ldg(d_nlist + head_idx);
// Scalar4 force = force_data.force[idx];
Scalar4 force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
// force.w = force_data.force[idx].w;
Scalar fxi = Scalar(0.0);
Scalar fyi = Scalar(0.0);
Scalar fzi = Scalar(0.0);
Scalar m_pe = Scalar(0.0);
Scalar pairForce = Scalar(0.0);
Scalar virial[6];
for (int i = 0; i < 6; i++)
virial[i] = Scalar(0.0);
force.w = d_force[idx].w;
int ntypes = eam_data_ti.ntypes;
int nr = eam_data_ti.nr;
Scalar rdr = eam_data_ti.rdr;
Scalar r_cutsq = eam_data_ti.r_cutsq;
Scalar d_dFdPidx = __ldg(d_dFdP + idx);
for (int neigh_idx = 0; neigh_idx < n_neigh; neigh_idx++)
{
cur_neigh = next_neigh;
next_neigh = __ldg(d_nlist + head_idx + neigh_idx + 1);
// get the neighbor's position
Scalar4 neigh_postype = __ldg(d_pos + cur_neigh);
Scalar3 neigh_pos = make_scalar3(neigh_postype.x, neigh_postype.y, neigh_postype.z);
// calculate dr (with periodic boundary conditions)
Scalar3 dx = pos - neigh_pos;
int typej = __scalar_as_int(neigh_postype.w);
// apply periodic boundary conditions
dx = box.minImage(dx);
// calculate r squared
Scalar rsq = dot(dx, dx);
if (rsq > r_cutsq)
continue;
// calculate position r for phi(r)
Scalar inverseR = rsqrtf(rsq);
Scalar r = Scalar(1.0) / inverseR;
position = r * rdr;
int_position = (unsigned int)position;
int_position = min(int_position, nr - 1);
remainder = position - int_position;
// calculate the shift position for type ij
int shift = (typei >= typej) ? (int)(0.5 * (2 * ntypes - typej - 1) * typej + typei) * nr
: (int)(0.5 * (2 * ntypes - typei - 1) * typei + typej) * nr;
idxs = int_position + shift;
v = __ldg(d_rphi + idxs);
dv = __ldg(d_drphi + idxs);
// aspair_potential = r * phi
Scalar aspair_potential = v.w + v.z * remainder + v.y * remainder * remainder
+ v.x * remainder * remainder * remainder;
// derivative_pair_potential = phi + r * dphi / dr
Scalar derivative_pair_potential = dv.z + dv.y * remainder + dv.x * remainder * remainder;
// pair_eng = phi
Scalar pair_eng = aspair_potential * inverseR;
// derivativePhi = (phi + r * dphi/dr - phi) * 1/r = dphi / dr
Scalar derivativePhi = (derivative_pair_potential - pair_eng) * inverseR;
// derivativeRhoI = drho / dr of i
idxs = int_position + typei * ntypes * nr + typej * nr;
dv = __ldg(d_drho + idxs);
Scalar derivativeRhoI = dv.z + dv.y * remainder + dv.x * remainder * remainder;
// derivativeRhoJ = drho / dr of j
idxs = int_position + typej * ntypes * nr + typei * nr;
dv = __ldg(d_drho + idxs);
Scalar derivativeRhoJ = dv.z + dv.y * remainder + dv.x * remainder * remainder;
// fullDerivativePhi = dF/dP * drho / dr for j + dF/dP * drho / dr for j + phi
Scalar d_dFdPcur = __ldg(d_dFdP + cur_neigh);
Scalar fullDerivativePhi
= d_dFdPidx * derivativeRhoJ + d_dFdPcur * derivativeRhoI + derivativePhi;
// compute forces
pairForce = -fullDerivativePhi * inverseR;
// avoid double counting
Scalar pairForceover2 = Scalar(0.5) * pairForce;
virial[0] += dx.x * dx.x * pairForceover2;
virial[1] += dx.x * dx.y * pairForceover2;
virial[2] += dx.x * dx.z * pairForceover2;
virial[3] += dx.y * dx.y * pairForceover2;
virial[4] += dx.y * dx.z * pairForceover2;
virial[5] += dx.z * dx.z * pairForceover2;
fxi += dx.x * pairForce;
fyi += dx.y * pairForce;
fzi += dx.z * pairForce;
m_pe += pair_eng * Scalar(0.5);
}
// now that the force calculation is complete, write out the result
force.x = fxi;
force.y = fyi;
force.z = fzi;
force.w += m_pe;
d_force[idx] = force;
for (int i = 0; i < 6; i++)
d_virial[i * virial_pitch + idx] = virial[i];
}
//! compute forces on GPU
hipError_t gpu_compute_eam_tex_inter_forces(Scalar4* d_force,
Scalar* d_virial,
const size_t virial_pitch,
const unsigned int N,
const Scalar4* d_pos,
const BoxDim& box,
const unsigned int* d_n_neigh,
const unsigned int* d_nlist,
const size_t* d_head_list,
const size_t size_nlist,
const EAMTexInterData* d_eam_data,
Scalar* d_dFdP,
const Scalar4* d_F,
const Scalar4* d_rho,
const Scalar4* d_rphi,
const Scalar4* d_dF,
const Scalar4* d_drho,
const Scalar4* d_drphi,
const unsigned int block_size)
{
unsigned int max_block_size_1;
unsigned int max_block_size_2;
hipFuncAttributes attr1;
hipFuncGetAttributes(&attr1, reinterpret_cast<const void*>(gpu_kernel_1));
hipFuncAttributes attr2;
hipFuncGetAttributes(&attr2, reinterpret_cast<const void*>(gpu_kernel_2));
max_block_size_1 = attr1.maxThreadsPerBlock;
max_block_size_2 = attr2.maxThreadsPerBlock;
unsigned int run_block_size_1 = min(block_size, max_block_size_1);
unsigned int run_block_size_2 = min(block_size, max_block_size_2);
// setup the grid to run the kernel
dim3 grid_1((int)ceil((double)N / (double)run_block_size_1), 1, 1);
dim3 threads_1(run_block_size_1, 1, 1);
dim3 grid_2((int)ceil((double)N / (double)run_block_size_2), 1, 1);
dim3 threads_2(run_block_size_2, 1, 1);
hipLaunchKernelGGL(gpu_kernel_1,
dim3(grid_1),
dim3(threads_1),
0,
0,
d_force,
d_virial,
virial_pitch,
N,
d_pos,
box,
d_n_neigh,
d_nlist,
d_head_list,
d_F,
d_rho,
d_rphi,
d_dF,
d_drho,
d_drphi,
d_dFdP,
d_eam_data);
hipLaunchKernelGGL(gpu_kernel_2,
dim3(grid_2),
dim3(threads_2),
0,
0,
d_force,
d_virial,
virial_pitch,
N,
d_pos,
box,
d_n_neigh,
d_nlist,
d_head_list,
d_F,
d_rho,
d_rphi,
d_dF,
d_drho,
d_drphi,
d_dFdP,
d_eam_data);
return hipSuccess;
}
} // end namespace kernel
} // end namespace metal
} // end namespace hoomd
| ec126077789afc9b39b941b086e3bd94bc10fb5d.cu | // Copyright (c) 2009-2023 The Regents of the University of Michigan.
// Part of HOOMD-blue, released under the BSD 3-Clause License.
#include "EAMForceGPU.cuh"
#include "hoomd/TextureTools.h"
#include <assert.h>
/*! \file EAMForceGPU.cu
\brief Defines GPU kernel code for calculating the EAM forces. Used by EAMForceComputeGPU.
*/
namespace hoomd
{
namespace metal
{
namespace kernel
{
//! Kernel for computing EAM forces on the GPU
__global__ void gpu_kernel_1(Scalar4* d_force,
Scalar* d_virial,
const size_t virial_pitch,
const unsigned int N,
const Scalar4* d_pos,
BoxDim box,
const unsigned int* d_n_neigh,
const unsigned int* d_nlist,
const size_t* d_head_list,
const Scalar4* d_F,
const Scalar4* d_rho,
const Scalar4* d_rphi,
const Scalar4* d_dF,
const Scalar4* d_drho,
const Scalar4* d_drphi,
Scalar* d_dFdP,
const EAMTexInterData* d_eam_data)
{
__shared__ EAMTexInterData eam_data_ti;
// copy over parameters one int per thread
unsigned int tidx = threadIdx.x;
unsigned int block_size = blockDim.x;
unsigned int param_size = sizeof(EAMTexInterData) / sizeof(int);
for (unsigned int cur_offset = 0; cur_offset < param_size; cur_offset += block_size)
{
if (cur_offset + tidx < param_size)
{
((int*)&eam_data_ti)[cur_offset + tidx] = ((int*)d_eam_data)[cur_offset + tidx];
}
}
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// load in the length of the list
int n_neigh = d_n_neigh[idx];
const size_t head_idx = d_head_list[idx];
// read in the position of our particle.
Scalar4 postype = __ldg(d_pos + idx);
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
// index and remainder
Scalar position; // look up position, scalar
unsigned int int_position; // look up index for position, integer
unsigned int idxs; // look up index in F, rho, rphi array, considering shift, integer
Scalar remainder; // look up remainder in array, integer
Scalar4 v, dv; // value, d(value)
// initialize the force to 0
Scalar4 force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
// prefetch neighbor index
int cur_neigh = 0;
int next_neigh(0);
next_neigh = __ldg(d_nlist + head_idx);
int typei = __scalar_as_int(postype.w);
// loop over neighbors
Scalar atomElectronDensity = Scalar(0.0);
int ntypes = eam_data_ti.ntypes;
int nrho = eam_data_ti.nrho;
int nr = eam_data_ti.nr;
Scalar rdrho = eam_data_ti.rdrho;
Scalar rdr = eam_data_ti.rdr;
Scalar r_cutsq = eam_data_ti.r_cutsq;
for (int neigh_idx = 0; neigh_idx < n_neigh; neigh_idx++)
{
// read the current neighbor index
// prefetch the next value and set the current one
cur_neigh = next_neigh;
next_neigh = __ldg(d_nlist + head_idx + neigh_idx + 1);
// get the neighbor's position
Scalar4 neigh_postype = __ldg(d_pos + cur_neigh);
Scalar3 neigh_pos = make_scalar3(neigh_postype.x, neigh_postype.y, neigh_postype.z);
// calculate dr (with periodic boundary conditions)
Scalar3 dx = pos - neigh_pos;
int typej = __scalar_as_int(neigh_postype.w);
// apply periodic boundary conditions
dx = box.minImage(dx);
// calculate r squared
Scalar rsq = dot(dx, dx);
;
if (rsq < r_cutsq)
{
// calculate position r for rho(r)
position = sqrtf(rsq) * rdr;
int_position = (unsigned int)position;
int_position = min(int_position, nr - 1);
remainder = position - int_position;
// calculate P = sum{rho}
idxs = int_position + nr * (typej * ntypes + typei);
v = __ldg(d_rho + idxs);
atomElectronDensity += v.w + v.z * remainder + v.y * remainder * remainder
+ v.x * remainder * remainder * remainder;
}
}
// calculate position rho for F(rho)
position = atomElectronDensity * rdrho;
int_position = (unsigned int)position;
int_position = min(int_position, nrho - 1);
remainder = position - int_position;
idxs = int_position + typei * nrho;
dv = __ldg(d_dF + idxs);
v = __ldg(d_F + idxs);
// compute dF / dP
d_dFdP[idx] = dv.z + dv.y * remainder + dv.x * remainder * remainder;
// compute embedded energy F(P), sum up each particle
force.w += v.w + v.z * remainder + v.y * remainder * remainder
+ v.x * remainder * remainder * remainder;
// update the d_force
d_force[idx] = force;
}
//! Second stage kernel for computing EAM forces on the GPU
__global__ void gpu_kernel_2(Scalar4* d_force,
Scalar* d_virial,
const size_t virial_pitch,
const unsigned int N,
const Scalar4* d_pos,
BoxDim box,
const unsigned int* d_n_neigh,
const unsigned int* d_nlist,
const size_t* d_head_list,
const Scalar4* d_F,
const Scalar4* d_rho,
const Scalar4* d_rphi,
const Scalar4* d_dF,
const Scalar4* d_drho,
const Scalar4* d_drphi,
Scalar* d_dFdP,
const EAMTexInterData* d_eam_data)
{
__shared__ EAMTexInterData eam_data_ti;
// copy over parameters one int per thread
unsigned int tidx = threadIdx.x;
unsigned int block_size = blockDim.x;
unsigned int param_size = sizeof(EAMTexInterData) / sizeof(int);
for (unsigned int cur_offset = 0; cur_offset < param_size; cur_offset += block_size)
{
if (cur_offset + tidx < param_size)
{
((int*)&eam_data_ti)[cur_offset + tidx] = ((int*)d_eam_data)[cur_offset + tidx];
}
}
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// load in the length of the list
int n_neigh = d_n_neigh[idx];
const size_t head_idx = d_head_list[idx];
// read in the position of our particle. Texture reads of Scalar4's are faster than global reads
Scalar4 postype = __ldg(d_pos + idx);
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
int typei = __scalar_as_int(postype.w);
// index and remainder
Scalar position; // look up position, scalar
unsigned int int_position; // look up index for position, integer
unsigned int idxs; // look up index in F, rho, rphi array, considering shift, integer
Scalar remainder; // look up remainder in array, integer
Scalar4 v, dv; // value, d(value)
// prefetch neighbor index
int cur_neigh = 0;
int next_neigh(0);
next_neigh = __ldg(d_nlist + head_idx);
// Scalar4 force = force_data.force[idx];
Scalar4 force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
// force.w = force_data.force[idx].w;
Scalar fxi = Scalar(0.0);
Scalar fyi = Scalar(0.0);
Scalar fzi = Scalar(0.0);
Scalar m_pe = Scalar(0.0);
Scalar pairForce = Scalar(0.0);
Scalar virial[6];
for (int i = 0; i < 6; i++)
virial[i] = Scalar(0.0);
force.w = d_force[idx].w;
int ntypes = eam_data_ti.ntypes;
int nr = eam_data_ti.nr;
Scalar rdr = eam_data_ti.rdr;
Scalar r_cutsq = eam_data_ti.r_cutsq;
Scalar d_dFdPidx = __ldg(d_dFdP + idx);
for (int neigh_idx = 0; neigh_idx < n_neigh; neigh_idx++)
{
cur_neigh = next_neigh;
next_neigh = __ldg(d_nlist + head_idx + neigh_idx + 1);
// get the neighbor's position
Scalar4 neigh_postype = __ldg(d_pos + cur_neigh);
Scalar3 neigh_pos = make_scalar3(neigh_postype.x, neigh_postype.y, neigh_postype.z);
// calculate dr (with periodic boundary conditions)
Scalar3 dx = pos - neigh_pos;
int typej = __scalar_as_int(neigh_postype.w);
// apply periodic boundary conditions
dx = box.minImage(dx);
// calculate r squared
Scalar rsq = dot(dx, dx);
if (rsq > r_cutsq)
continue;
// calculate position r for phi(r)
Scalar inverseR = rsqrtf(rsq);
Scalar r = Scalar(1.0) / inverseR;
position = r * rdr;
int_position = (unsigned int)position;
int_position = min(int_position, nr - 1);
remainder = position - int_position;
// calculate the shift position for type ij
int shift = (typei >= typej) ? (int)(0.5 * (2 * ntypes - typej - 1) * typej + typei) * nr
: (int)(0.5 * (2 * ntypes - typei - 1) * typei + typej) * nr;
idxs = int_position + shift;
v = __ldg(d_rphi + idxs);
dv = __ldg(d_drphi + idxs);
// aspair_potential = r * phi
Scalar aspair_potential = v.w + v.z * remainder + v.y * remainder * remainder
+ v.x * remainder * remainder * remainder;
// derivative_pair_potential = phi + r * dphi / dr
Scalar derivative_pair_potential = dv.z + dv.y * remainder + dv.x * remainder * remainder;
// pair_eng = phi
Scalar pair_eng = aspair_potential * inverseR;
// derivativePhi = (phi + r * dphi/dr - phi) * 1/r = dphi / dr
Scalar derivativePhi = (derivative_pair_potential - pair_eng) * inverseR;
// derivativeRhoI = drho / dr of i
idxs = int_position + typei * ntypes * nr + typej * nr;
dv = __ldg(d_drho + idxs);
Scalar derivativeRhoI = dv.z + dv.y * remainder + dv.x * remainder * remainder;
// derivativeRhoJ = drho / dr of j
idxs = int_position + typej * ntypes * nr + typei * nr;
dv = __ldg(d_drho + idxs);
Scalar derivativeRhoJ = dv.z + dv.y * remainder + dv.x * remainder * remainder;
// fullDerivativePhi = dF/dP * drho / dr for j + dF/dP * drho / dr for j + phi
Scalar d_dFdPcur = __ldg(d_dFdP + cur_neigh);
Scalar fullDerivativePhi
= d_dFdPidx * derivativeRhoJ + d_dFdPcur * derivativeRhoI + derivativePhi;
// compute forces
pairForce = -fullDerivativePhi * inverseR;
// avoid double counting
Scalar pairForceover2 = Scalar(0.5) * pairForce;
virial[0] += dx.x * dx.x * pairForceover2;
virial[1] += dx.x * dx.y * pairForceover2;
virial[2] += dx.x * dx.z * pairForceover2;
virial[3] += dx.y * dx.y * pairForceover2;
virial[4] += dx.y * dx.z * pairForceover2;
virial[5] += dx.z * dx.z * pairForceover2;
fxi += dx.x * pairForce;
fyi += dx.y * pairForce;
fzi += dx.z * pairForce;
m_pe += pair_eng * Scalar(0.5);
}
// now that the force calculation is complete, write out the result
force.x = fxi;
force.y = fyi;
force.z = fzi;
force.w += m_pe;
d_force[idx] = force;
for (int i = 0; i < 6; i++)
d_virial[i * virial_pitch + idx] = virial[i];
}
//! compute forces on GPU
hipError_t gpu_compute_eam_tex_inter_forces(Scalar4* d_force,
Scalar* d_virial,
const size_t virial_pitch,
const unsigned int N,
const Scalar4* d_pos,
const BoxDim& box,
const unsigned int* d_n_neigh,
const unsigned int* d_nlist,
const size_t* d_head_list,
const size_t size_nlist,
const EAMTexInterData* d_eam_data,
Scalar* d_dFdP,
const Scalar4* d_F,
const Scalar4* d_rho,
const Scalar4* d_rphi,
const Scalar4* d_dF,
const Scalar4* d_drho,
const Scalar4* d_drphi,
const unsigned int block_size)
{
unsigned int max_block_size_1;
unsigned int max_block_size_2;
hipFuncAttributes attr1;
hipFuncGetAttributes(&attr1, reinterpret_cast<const void*>(gpu_kernel_1));
hipFuncAttributes attr2;
hipFuncGetAttributes(&attr2, reinterpret_cast<const void*>(gpu_kernel_2));
max_block_size_1 = attr1.maxThreadsPerBlock;
max_block_size_2 = attr2.maxThreadsPerBlock;
unsigned int run_block_size_1 = min(block_size, max_block_size_1);
unsigned int run_block_size_2 = min(block_size, max_block_size_2);
// setup the grid to run the kernel
dim3 grid_1((int)ceil((double)N / (double)run_block_size_1), 1, 1);
dim3 threads_1(run_block_size_1, 1, 1);
dim3 grid_2((int)ceil((double)N / (double)run_block_size_2), 1, 1);
dim3 threads_2(run_block_size_2, 1, 1);
hipLaunchKernelGGL(gpu_kernel_1,
dim3(grid_1),
dim3(threads_1),
0,
0,
d_force,
d_virial,
virial_pitch,
N,
d_pos,
box,
d_n_neigh,
d_nlist,
d_head_list,
d_F,
d_rho,
d_rphi,
d_dF,
d_drho,
d_drphi,
d_dFdP,
d_eam_data);
hipLaunchKernelGGL(gpu_kernel_2,
dim3(grid_2),
dim3(threads_2),
0,
0,
d_force,
d_virial,
virial_pitch,
N,
d_pos,
box,
d_n_neigh,
d_nlist,
d_head_list,
d_F,
d_rho,
d_rphi,
d_dF,
d_drho,
d_drphi,
d_dFdP,
d_eam_data);
return hipSuccess;
}
} // end namespace kernel
} // end namespace metal
} // end namespace hoomd
|
9bb81f255e49404a435ece54bfca7d918a6ac3a5.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/sort.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int)(pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int)(pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int)(pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
static int * dev_materialIDs_1 = NULL;
static int * dev_materialIDs_2 = NULL;
// first bounce intersections cache
static ShadeableIntersection * dev_first_bounce_intersections = NULL;
// texture buffer
glm::vec3 ** dev_textures = NULL;
glm::vec2 * dev_textureSizes = NULL;
glm::vec3 ** dev_normal_maps = NULL;
glm::vec2 * dev_normal_mapSizes = NULL;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice);
hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
hipMalloc(&dev_materialIDs_1, pixelcount * sizeof(int));
hipMalloc(&dev_materialIDs_2, pixelcount * sizeof(int));
hipMalloc(&dev_first_bounce_intersections, pixelcount * sizeof(ShadeableIntersection));
// copy texutre memory to device
int textureSize = hst_scene->textures.size();
hipMalloc((void**)&dev_textures, textureSize * sizeof(glm::vec3*));
hipMalloc((void**)&dev_textureSizes, textureSize * sizeof(glm::vec2));
std::vector<glm::vec3*> textures;
std::vector<glm::vec2> textureSizes;
glm::vec3* tmp;
for (int i = 0; i < textureSize; ++i)
{
int texPixelCount = hst_scene->textures[i]->pixelCount();
hipMalloc((void**)&tmp, texPixelCount * sizeof(glm::vec3));
hipMemcpy(tmp, hst_scene->textures[i]->pixels, texPixelCount * sizeof(glm::vec3), hipMemcpyHostToDevice);
textures.push_back(tmp);
textureSizes.push_back(hst_scene->textures[i]->getSize());
}
hipMemcpy(dev_textures, textures.data(), textureSize * sizeof(glm::vec3*), hipMemcpyHostToDevice);
hipMemcpy(dev_textureSizes, textureSizes.data(), textureSize * sizeof(glm::vec2), hipMemcpyHostToDevice);
// copy normal maps into device memory
int normalMapSize = hst_scene->normalMaps.size();
hipMalloc((void**)&dev_normal_maps, normalMapSize * sizeof(glm::vec3*));
hipMalloc((void**)&dev_normal_mapSizes, normalMapSize * sizeof(glm::vec2));
std::vector<glm::vec3*> normal_maps;
std::vector<glm::vec2> normal_map_sizes;
for (int i = 0; i < normalMapSize; ++i)
{
int texPixelCount = hst_scene->normalMaps[i]->pixelCount();
hipMalloc((void**)&tmp, texPixelCount * sizeof(glm::vec3));
hipMemcpy(tmp, hst_scene->normalMaps[i]->pixels, texPixelCount * sizeof(glm::vec3), hipMemcpyHostToDevice);
normal_maps.push_back(tmp);
normal_map_sizes.push_back(hst_scene->normalMaps[i]->getSize());
}
hipMemcpy(dev_normal_maps, normal_maps.data(), normalMapSize * sizeof(glm::vec3*), hipMemcpyHostToDevice);
hipMemcpy(dev_normal_mapSizes, normal_map_sizes.data(), normalMapSize * sizeof(glm::vec2), hipMemcpyHostToDevice);
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_paths);
hipFree(dev_geoms);
hipFree(dev_materials);
hipFree(dev_intersections);
// TODO: clean up any extra device memory you created
hipFree(dev_materialIDs_1);
hipFree(dev_materialIDs_2);
hipFree(dev_first_bounce_intersections);
//free texture memory on device
hipFree(dev_textures);
hipFree(dev_textureSizes);
// free normal map memory
hipFree(dev_normal_maps);
hipFree(dev_normal_mapSizes);
// check error
checkCUDAError("pathtraceFree");
}
/**
* helper function for DOF
* improvement for ConcentrixSampleDisc
* reference : http://psgraphics.blogspot.com/2011/01/improved-code-for-concentric-map.html
*/
__device__ glm::vec2 ConcentricSampleDisc(float u1, float u2)
{
float phi, r;
float a = 2.0f * u1 - 1.0f;
float b = 2.0f * u2 - 1.0f;
if (a*a > b*b)
{
r = a;
phi = (PI / 4.0f) *(b / a);
}
else
{
r = b;
phi = (PI / 2.0f) - (PI / 4.0f)*(a / b);
}
return glm::vec2(r*glm::cos(phi), r*glm::sin(phi));
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(
Camera cam, int iter, int traceDepth, PathSegment* pathSegments,
bool stochasticAA)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
//segment.color = glm::vec3(0, 0, 0);
// TODO: implement antialiasing by jittering the ray
if (!stochasticAA)
{
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
}
else
{
thrust::uniform_real_distribution<float> u01(0, 1);
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
float dx = u01(rng);
float dy = u01(rng);
float fx = (float)x + dx;
float fy = (float)y + dy;
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)fx - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)fy - (float)cam.resolution.y * 0.5f)
);
}
if (cam.DOF.x > 0.0f) // Depth of field
{
thrust::uniform_real_distribution<float> u01(0, 1);
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
glm::vec2 lenUV = ConcentricSampleDisc(u01(rng), u01(rng));
lenUV *= cam.DOF.x;
float ft = glm::abs(cam.DOF.y / cam.view.z);
glm::vec3 pfocus = segment.ray.direction * ft + segment.ray.origin;
segment.ray.origin += lenUV.x * cam.right + lenUV.y * cam.up;
segment.ray.direction = glm::normalize(pfocus - segment.ray.origin);
}
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// TODO:
// pathTraceOneBounce handles ray intersections, generate intersections for shading,
// and scatter new ray. You might want to call scatterRay from interactions.h
__global__ void pathTraceOneBounce(
int iter,
int depth,
int num_paths,
PathSegment * pathSegments,
Geom * geoms,
int geoms_size,
Material * materials,
int material_size,
ShadeableIntersection * intersections,
Geom csg_box, Geom csg_sphere
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
if (pathSegment.remainingBounces <= 0)
return;
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec2 uv = glm::vec2(-1, -1);
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
glm::vec2 tmp_uv = glm::vec2(-1, -1);
// naive parse through global geoms
thrust::uniform_real_distribution<float> u01(0, 1);
thrust::default_random_engine rng = makeSeededRandomEngine(iter, path_index, depth);
int csgMaterialID;
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
float u = geom.hasMotionBlur ? u01(rng) : -1;
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside, tmp_uv, u);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside, tmp_uv, u);
}
else if (geom.type == CSG)
{
t = csgIntersectionTest(geom, pathSegment.ray, csg_box, csg_sphere, tmp_intersect, tmp_normal, tmp_uv, csgMaterialID);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
uv = tmp_uv;
}
}
// TODO: scatter the ray, generate intersections for shading
// feel free to modify the code below
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
//intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].materialId = (geoms[hit_geom_index].type == CSG ? csgMaterialID : geoms[hit_geom_index].materialid);
intersections[path_index].surfaceNormal = normal;
intersections[path_index].hit_geom_index = hit_geom_index;
intersections[path_index].uv = uv;
}
}
}
/**
* Shading function
*/
__global__ void shadingAndEvaluatingBSDF(
int iter,
int depth,
int num_paths,
ShadeableIntersection * shadeableIntersections,
PathSegment * pathSegments,
Geom * geoms,
int geoms_size,
Material * materials,
glm::vec3** textures,
glm::vec2* textureSizes,
glm::vec3** normal_maps,
glm::vec2* normal_mapSizes
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_paths)
return;
ShadeableIntersection &isx = shadeableIntersections[idx];
PathSegment &pathSeg = pathSegments[idx];
if (isx.t > 0.0f)
{
Material &material = materials[isx.materialId];
glm::vec3 color;
if (material.emittance > 0) // light source
{
pathSeg.color *= material.color * material.emittance;
pathSeg.remainingBounces = 0;
}
else // bounce ray
{
glm::vec3 intersectPoint = pathSeg.ray.origin + pathSeg.ray.direction * isx.t;
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth);
scatterRay(pathSeg, intersectPoint, isx.surfaceNormal, material, textures, textureSizes, isx.uv, rng, normal_maps, normal_mapSizes, geoms[isx.hit_geom_index]);
pathSeg.remainingBounces--;
}
}
else // hit nothing
{
pathSeg.color = glm::vec3(0);
pathSeg.remainingBounces = 0;
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
// update terminated segments to image
__global__ void kernUpdateTerminatedSegmentsToImage(
int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths && iterationPaths[index].remainingBounces <= 0)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
/**
* predictor for thrust::remove_if
*/
struct terminate_path
{
__host__ __device__
bool operator()(const PathSegment & pathSeg)
{
return (pathSeg.remainingBounces <= 0);
}
};
__global__ void kernGetMaterialIDs(
int nPaths, int * dev_materialIDs1, int * dev_materialIDs2, ShadeableIntersection * intersections)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= nPaths)
return;
dev_materialIDs2[index] = dev_materialIDs1[index] = intersections[index].materialId;
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
const bool reshuffleByMaterialIDs = hst_scene->state.reshuffleByMaterialIDs;
const bool useFirstBounceIntersectionCache = hst_scene->state.useFirstBounceIntersectionCache;
const bool stochasticAntialising = hst_scene->state.stochasticAntialiasing;
const bool useStreamCompaction = hst_scene->state.useStreamCompaction;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> >(cam, iter, traceDepth, dev_paths, stochasticAntialising);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
int remainingNumPaths = num_paths;
dim3 numblocksPathSegmentTracing;
while (!iterationComplete) {
// clean shading chunks
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
numblocksPathSegmentTracing = (remainingNumPaths + blockSize1d - 1) / blockSize1d;
// first bounce caching related, so ugly
if (!useFirstBounceIntersectionCache ||
(useFirstBounceIntersectionCache && ((depth == 0 && iter == 1) || (depth > 0)))
)
{
// tracing
pathTraceOneBounce << <numblocksPathSegmentTracing, blockSize1d >> > (
iter,
depth,
remainingNumPaths,
dev_paths,
dev_geoms,
hst_scene->geoms.size(),
dev_materials,
hst_scene->materials.size(),
dev_intersections,
csg_box,csg_sphere
);
checkCUDAError("trace one bounce");
hipDeviceSynchronize();
}
if (useFirstBounceIntersectionCache && (depth == 0 && iter == 1))
{
hipMemcpy(dev_first_bounce_intersections, dev_intersections, remainingNumPaths * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice);
}
if (useFirstBounceIntersectionCache && (depth == 0 && iter > 1))
{
hipMemcpy(dev_intersections, dev_first_bounce_intersections, remainingNumPaths * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice);
}
depth++;
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
if (reshuffleByMaterialIDs)
{
// after tracing one bounce , get materialIDs for reshuffling intersections and pathSegmetns
kernGetMaterialIDs << <numblocksPathSegmentTracing, blockSize1d >> >(
remainingNumPaths,
dev_materialIDs_1,
dev_materialIDs_2,
dev_intersections
);
thrust::sort_by_key(thrust::device, dev_materialIDs_1, dev_materialIDs_1 + remainingNumPaths, dev_paths);
thrust::sort_by_key(thrust::device, dev_materialIDs_2, dev_materialIDs_2 + remainingNumPaths, dev_intersections);
}
// shading and generate new directions using BSDF evaluation
shadingAndEvaluatingBSDF << <numblocksPathSegmentTracing, blockSize1d >> > (
iter,
depth,
remainingNumPaths,
dev_intersections,
dev_paths,
dev_geoms,
hst_scene->geoms.size(),
dev_materials,
dev_textures,
dev_textureSizes,
dev_normal_maps,
dev_normal_mapSizes
);
checkCUDAError("shading");
hipDeviceSynchronize();
if (useStreamCompaction)
{
// update terminated segments to final image
kernUpdateTerminatedSegmentsToImage << <numblocksPathSegmentTracing, blockSize1d >> >(
remainingNumPaths,
dev_image,
dev_paths
);
checkCUDAError("udpate terminated segments");
hipDeviceSynchronize();
// stream compaction, delete that paths that remainingBounces <= 0
//std::cout << "before compaction = " << remainingNumPaths;
PathSegment *newPathEnd = thrust::remove_if(thrust::device, dev_paths, dev_paths + remainingNumPaths, terminate_path());
if (newPathEnd != NULL)
{
remainingNumPaths = newPathEnd - dev_paths;
}
else
{
remainingNumPaths = 0;
}
checkCUDAError("thrust::remove_if");
std::cout << "Iteration #" << iter << " ==> After compaction, number of rays = " << remainingNumPaths << std::endl;
}
iterationComplete = (depth >= traceDepth || remainingNumPaths <= 0); // TODO: should be based off stream compaction results.
}
// Assemble this iteration and apply it to the image
if (!useStreamCompaction)
{
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather << <numBlocksPixels, blockSize1d >> >(num_paths, dev_image, dev_paths);
}
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO << <blocksPerGrid2d, blockSize2d >> >(pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
| 9bb81f255e49404a435ece54bfca7d918a6ac3a5.cu | #include <cstdio>
#include <cuda.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/sort.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int)(pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int)(pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int)(pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
static int * dev_materialIDs_1 = NULL;
static int * dev_materialIDs_2 = NULL;
// first bounce intersections cache
static ShadeableIntersection * dev_first_bounce_intersections = NULL;
// texture buffer
glm::vec3 ** dev_textures = NULL;
glm::vec2 * dev_textureSizes = NULL;
glm::vec3 ** dev_normal_maps = NULL;
glm::vec2 * dev_normal_mapSizes = NULL;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice);
cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
cudaMalloc(&dev_materialIDs_1, pixelcount * sizeof(int));
cudaMalloc(&dev_materialIDs_2, pixelcount * sizeof(int));
cudaMalloc(&dev_first_bounce_intersections, pixelcount * sizeof(ShadeableIntersection));
// copy texutre memory to device
int textureSize = hst_scene->textures.size();
cudaMalloc((void**)&dev_textures, textureSize * sizeof(glm::vec3*));
cudaMalloc((void**)&dev_textureSizes, textureSize * sizeof(glm::vec2));
std::vector<glm::vec3*> textures;
std::vector<glm::vec2> textureSizes;
glm::vec3* tmp;
for (int i = 0; i < textureSize; ++i)
{
int texPixelCount = hst_scene->textures[i]->pixelCount();
cudaMalloc((void**)&tmp, texPixelCount * sizeof(glm::vec3));
cudaMemcpy(tmp, hst_scene->textures[i]->pixels, texPixelCount * sizeof(glm::vec3), cudaMemcpyHostToDevice);
textures.push_back(tmp);
textureSizes.push_back(hst_scene->textures[i]->getSize());
}
cudaMemcpy(dev_textures, textures.data(), textureSize * sizeof(glm::vec3*), cudaMemcpyHostToDevice);
cudaMemcpy(dev_textureSizes, textureSizes.data(), textureSize * sizeof(glm::vec2), cudaMemcpyHostToDevice);
// copy normal maps into device memory
int normalMapSize = hst_scene->normalMaps.size();
cudaMalloc((void**)&dev_normal_maps, normalMapSize * sizeof(glm::vec3*));
cudaMalloc((void**)&dev_normal_mapSizes, normalMapSize * sizeof(glm::vec2));
std::vector<glm::vec3*> normal_maps;
std::vector<glm::vec2> normal_map_sizes;
for (int i = 0; i < normalMapSize; ++i)
{
int texPixelCount = hst_scene->normalMaps[i]->pixelCount();
cudaMalloc((void**)&tmp, texPixelCount * sizeof(glm::vec3));
cudaMemcpy(tmp, hst_scene->normalMaps[i]->pixels, texPixelCount * sizeof(glm::vec3), cudaMemcpyHostToDevice);
normal_maps.push_back(tmp);
normal_map_sizes.push_back(hst_scene->normalMaps[i]->getSize());
}
cudaMemcpy(dev_normal_maps, normal_maps.data(), normalMapSize * sizeof(glm::vec3*), cudaMemcpyHostToDevice);
cudaMemcpy(dev_normal_mapSizes, normal_map_sizes.data(), normalMapSize * sizeof(glm::vec2), cudaMemcpyHostToDevice);
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_paths);
cudaFree(dev_geoms);
cudaFree(dev_materials);
cudaFree(dev_intersections);
// TODO: clean up any extra device memory you created
cudaFree(dev_materialIDs_1);
cudaFree(dev_materialIDs_2);
cudaFree(dev_first_bounce_intersections);
//free texture memory on device
cudaFree(dev_textures);
cudaFree(dev_textureSizes);
// free normal map memory
cudaFree(dev_normal_maps);
cudaFree(dev_normal_mapSizes);
// check error
checkCUDAError("pathtraceFree");
}
/**
* helper function for DOF
* improvement for ConcentrixSampleDisc
* reference : http://psgraphics.blogspot.com/2011/01/improved-code-for-concentric-map.html
*/
__device__ glm::vec2 ConcentricSampleDisc(float u1, float u2)
{
float phi, r;
float a = 2.0f * u1 - 1.0f;
float b = 2.0f * u2 - 1.0f;
if (a*a > b*b)
{
r = a;
phi = (PI / 4.0f) *(b / a);
}
else
{
r = b;
phi = (PI / 2.0f) - (PI / 4.0f)*(a / b);
}
return glm::vec2(r*glm::cos(phi), r*glm::sin(phi));
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(
Camera cam, int iter, int traceDepth, PathSegment* pathSegments,
bool stochasticAA)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
//segment.color = glm::vec3(0, 0, 0);
// TODO: implement antialiasing by jittering the ray
if (!stochasticAA)
{
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
}
else
{
thrust::uniform_real_distribution<float> u01(0, 1);
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
float dx = u01(rng);
float dy = u01(rng);
float fx = (float)x + dx;
float fy = (float)y + dy;
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)fx - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)fy - (float)cam.resolution.y * 0.5f)
);
}
if (cam.DOF.x > 0.0f) // Depth of field
{
thrust::uniform_real_distribution<float> u01(0, 1);
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
glm::vec2 lenUV = ConcentricSampleDisc(u01(rng), u01(rng));
lenUV *= cam.DOF.x;
float ft = glm::abs(cam.DOF.y / cam.view.z);
glm::vec3 pfocus = segment.ray.direction * ft + segment.ray.origin;
segment.ray.origin += lenUV.x * cam.right + lenUV.y * cam.up;
segment.ray.direction = glm::normalize(pfocus - segment.ray.origin);
}
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// TODO:
// pathTraceOneBounce handles ray intersections, generate intersections for shading,
// and scatter new ray. You might want to call scatterRay from interactions.h
__global__ void pathTraceOneBounce(
int iter,
int depth,
int num_paths,
PathSegment * pathSegments,
Geom * geoms,
int geoms_size,
Material * materials,
int material_size,
ShadeableIntersection * intersections,
Geom csg_box, Geom csg_sphere
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
if (pathSegment.remainingBounces <= 0)
return;
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec2 uv = glm::vec2(-1, -1);
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
glm::vec2 tmp_uv = glm::vec2(-1, -1);
// naive parse through global geoms
thrust::uniform_real_distribution<float> u01(0, 1);
thrust::default_random_engine rng = makeSeededRandomEngine(iter, path_index, depth);
int csgMaterialID;
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
float u = geom.hasMotionBlur ? u01(rng) : -1;
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside, tmp_uv, u);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside, tmp_uv, u);
}
else if (geom.type == CSG)
{
t = csgIntersectionTest(geom, pathSegment.ray, csg_box, csg_sphere, tmp_intersect, tmp_normal, tmp_uv, csgMaterialID);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
uv = tmp_uv;
}
}
// TODO: scatter the ray, generate intersections for shading
// feel free to modify the code below
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
//intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].materialId = (geoms[hit_geom_index].type == CSG ? csgMaterialID : geoms[hit_geom_index].materialid);
intersections[path_index].surfaceNormal = normal;
intersections[path_index].hit_geom_index = hit_geom_index;
intersections[path_index].uv = uv;
}
}
}
/**
* Shading function
*/
__global__ void shadingAndEvaluatingBSDF(
int iter,
int depth,
int num_paths,
ShadeableIntersection * shadeableIntersections,
PathSegment * pathSegments,
Geom * geoms,
int geoms_size,
Material * materials,
glm::vec3** textures,
glm::vec2* textureSizes,
glm::vec3** normal_maps,
glm::vec2* normal_mapSizes
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_paths)
return;
ShadeableIntersection &isx = shadeableIntersections[idx];
PathSegment &pathSeg = pathSegments[idx];
if (isx.t > 0.0f)
{
Material &material = materials[isx.materialId];
glm::vec3 color;
if (material.emittance > 0) // light source
{
pathSeg.color *= material.color * material.emittance;
pathSeg.remainingBounces = 0;
}
else // bounce ray
{
glm::vec3 intersectPoint = pathSeg.ray.origin + pathSeg.ray.direction * isx.t;
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth);
scatterRay(pathSeg, intersectPoint, isx.surfaceNormal, material, textures, textureSizes, isx.uv, rng, normal_maps, normal_mapSizes, geoms[isx.hit_geom_index]);
pathSeg.remainingBounces--;
}
}
else // hit nothing
{
pathSeg.color = glm::vec3(0);
pathSeg.remainingBounces = 0;
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
// update terminated segments to image
__global__ void kernUpdateTerminatedSegmentsToImage(
int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths && iterationPaths[index].remainingBounces <= 0)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
/**
* predictor for thrust::remove_if
*/
struct terminate_path
{
__host__ __device__
bool operator()(const PathSegment & pathSeg)
{
return (pathSeg.remainingBounces <= 0);
}
};
__global__ void kernGetMaterialIDs(
int nPaths, int * dev_materialIDs1, int * dev_materialIDs2, ShadeableIntersection * intersections)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= nPaths)
return;
dev_materialIDs2[index] = dev_materialIDs1[index] = intersections[index].materialId;
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
const bool reshuffleByMaterialIDs = hst_scene->state.reshuffleByMaterialIDs;
const bool useFirstBounceIntersectionCache = hst_scene->state.useFirstBounceIntersectionCache;
const bool stochasticAntialising = hst_scene->state.stochasticAntialiasing;
const bool useStreamCompaction = hst_scene->state.useStreamCompaction;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> >(cam, iter, traceDepth, dev_paths, stochasticAntialising);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
int remainingNumPaths = num_paths;
dim3 numblocksPathSegmentTracing;
while (!iterationComplete) {
// clean shading chunks
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
numblocksPathSegmentTracing = (remainingNumPaths + blockSize1d - 1) / blockSize1d;
// first bounce caching related, so ugly
if (!useFirstBounceIntersectionCache ||
(useFirstBounceIntersectionCache && ((depth == 0 && iter == 1) || (depth > 0)))
)
{
// tracing
pathTraceOneBounce << <numblocksPathSegmentTracing, blockSize1d >> > (
iter,
depth,
remainingNumPaths,
dev_paths,
dev_geoms,
hst_scene->geoms.size(),
dev_materials,
hst_scene->materials.size(),
dev_intersections,
csg_box,csg_sphere
);
checkCUDAError("trace one bounce");
cudaDeviceSynchronize();
}
if (useFirstBounceIntersectionCache && (depth == 0 && iter == 1))
{
cudaMemcpy(dev_first_bounce_intersections, dev_intersections, remainingNumPaths * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice);
}
if (useFirstBounceIntersectionCache && (depth == 0 && iter > 1))
{
cudaMemcpy(dev_intersections, dev_first_bounce_intersections, remainingNumPaths * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice);
}
depth++;
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
if (reshuffleByMaterialIDs)
{
// after tracing one bounce , get materialIDs for reshuffling intersections and pathSegmetns
kernGetMaterialIDs << <numblocksPathSegmentTracing, blockSize1d >> >(
remainingNumPaths,
dev_materialIDs_1,
dev_materialIDs_2,
dev_intersections
);
thrust::sort_by_key(thrust::device, dev_materialIDs_1, dev_materialIDs_1 + remainingNumPaths, dev_paths);
thrust::sort_by_key(thrust::device, dev_materialIDs_2, dev_materialIDs_2 + remainingNumPaths, dev_intersections);
}
// shading and generate new directions using BSDF evaluation
shadingAndEvaluatingBSDF << <numblocksPathSegmentTracing, blockSize1d >> > (
iter,
depth,
remainingNumPaths,
dev_intersections,
dev_paths,
dev_geoms,
hst_scene->geoms.size(),
dev_materials,
dev_textures,
dev_textureSizes,
dev_normal_maps,
dev_normal_mapSizes
);
checkCUDAError("shading");
cudaDeviceSynchronize();
if (useStreamCompaction)
{
// update terminated segments to final image
kernUpdateTerminatedSegmentsToImage << <numblocksPathSegmentTracing, blockSize1d >> >(
remainingNumPaths,
dev_image,
dev_paths
);
checkCUDAError("udpate terminated segments");
cudaDeviceSynchronize();
// stream compaction, delete that paths that remainingBounces <= 0
//std::cout << "before compaction = " << remainingNumPaths;
PathSegment *newPathEnd = thrust::remove_if(thrust::device, dev_paths, dev_paths + remainingNumPaths, terminate_path());
if (newPathEnd != NULL)
{
remainingNumPaths = newPathEnd - dev_paths;
}
else
{
remainingNumPaths = 0;
}
checkCUDAError("thrust::remove_if");
std::cout << "Iteration #" << iter << " ==> After compaction, number of rays = " << remainingNumPaths << std::endl;
}
iterationComplete = (depth >= traceDepth || remainingNumPaths <= 0); // TODO: should be based off stream compaction results.
}
// Assemble this iteration and apply it to the image
if (!useStreamCompaction)
{
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather << <numBlocksPixels, blockSize1d >> >(num_paths, dev_image, dev_paths);
}
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO << <blocksPerGrid2d, blockSize2d >> >(pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
|
804c623a1ffc01a89b38eef54ca3540c2f7b0d14.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/pooling_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data, int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_slice[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
}
else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const rand_idx, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
Dtype cumsum = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
}
}
const float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_slice[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = FLT_MIN;
Dtype cumvalues = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];
}
}
top_data[index] = cumvalues / cumsum;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
const int height_ = input_shape_[2];
const int width_ = input_shape_[3];
const int pooled_height_ = pooled_shape_[2];
const int pooled_width_ = pooled_shape_[3];
const int kernel_h_ = kernel_shape_[0];
const int kernel_w_ = kernel_shape_[1];
const int pad_h_ = pad_[0];
const int pad_w_ = pad_[1];
const int stride_h_ = stride_[0];
const int stride_w_ = stride_[1];
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
}
else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (this->phase_ == TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTrain<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
}
else {
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTest<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff,
const int* const mask, const Dtype* const top_mask, const int num,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int phend = min((h + pad_h) / stride_h + 1, pooled_height);
const int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
const int offset = (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice = top_diff + offset;
if (mask) {
const int* const mask_slice = mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
}
else {
const Dtype* const top_mask_slice = top_mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* const rand_idx, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const rand_idx_slice =
rand_idx + (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff_slice[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
const int height_ = input_shape_[2];
const int width_ = input_shape_[3];
const int pooled_height_ = pooled_shape_[2];
const int pooled_width_ = pooled_shape_[3];
const int kernel_h_ = kernel_shape_[0];
const int kernel_w_ = kernel_shape_[1];
const int pad_h_ = pad_[0];
const int pad_w_ = pad_[1];
const int stride_h_ = stride_[0];
const int stride_w_ = stride_[1];
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
}
else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer);
} // namespace caffe
| 804c623a1ffc01a89b38eef54ca3540c2f7b0d14.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/pooling_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data, int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_slice[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
}
else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const rand_idx, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
Dtype cumsum = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
}
}
const float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_slice[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = FLT_MIN;
Dtype cumvalues = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];
}
}
top_data[index] = cumvalues / cumsum;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
const int height_ = input_shape_[2];
const int width_ = input_shape_[3];
const int pooled_height_ = pooled_shape_[2];
const int pooled_width_ = pooled_shape_[3];
const int kernel_h_ = kernel_shape_[0];
const int kernel_w_ = kernel_shape_[1];
const int pad_h_ = pad_[0];
const int pad_w_ = pad_[1];
const int stride_h_ = stride_[0];
const int stride_w_ = stride_[1];
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
}
else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (this->phase_ == TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTrain<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
}
else {
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTest<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff,
const int* const mask, const Dtype* const top_mask, const int num,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int phend = min((h + pad_h) / stride_h + 1, pooled_height);
const int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
const int offset = (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice = top_diff + offset;
if (mask) {
const int* const mask_slice = mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
}
else {
const Dtype* const top_mask_slice = top_mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* const rand_idx, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const rand_idx_slice =
rand_idx + (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff_slice[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
const int height_ = input_shape_[2];
const int width_ = input_shape_[3];
const int pooled_height_ = pooled_shape_[2];
const int pooled_width_ = pooled_shape_[3];
const int kernel_h_ = kernel_shape_[0];
const int kernel_w_ = kernel_shape_[1];
const int pad_h_ = pad_[0];
const int pad_w_ = pad_[1];
const int stride_h_ = stride_[0];
const int stride_w_ = stride_[1];
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
}
else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer);
} // namespace caffe
|
scan_efficient.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _SCAN_WORKEFFICIENT_KERNEL_H_
#define _SCAN_WORKEFFICIENT_KERNEL_H_
#
#include "local_macros.h"
#define TIDX (__mul24(blockIdx.x,blockDim.x) + threadIdx.x)
#define TIDY (__mul24(blockIdx.y,blockDim.y) + threadIdx.y)
#define TWIDTH (__mul24(gridDim.x,blockDim.x))
#define THEIGHT (__mul24(gridDim.y,blockDim.y))
#define ArrayID (TIDY*TWIDTH+TIDX)
#define MAKE_FLOAT4(arg) make_float4((arg), (arg), (arg), (arg))
#define MAKE_INT4(arg) make_int4((arg).x, (arg).y, (arg).z, (arg).w);
// Written by NVidia
// Modified by Gordon Erlebacher, Feb. 21, 2008
//----------------------------------------------------------------------
__global__ void scan_workefficient_2(float4 *g_idata, float4 *sum, int4* seeds, int n, int edge, int width)
{
// Dynamically allocated shared memory for scan kernels
#if 1
extern __shared__ float4 temp[];
float4 zero = make_float4(0.,0.,0.,0.);
//if (blockIdx.x != 2) return;
int numThreads = blockDim.x * blockDim.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int thid = threadIdx.x + blockDim.x * threadIdx.y;
temp[2*thid] = zero;
temp[2*thid+1] = zero;
int blockId = blockIdx.x;
int4& seed = *(seeds+blockId);
// compute 2D flat texture coordinate from 3D seed coordinate
int x = seed.x;
int y = seed.y;
// edge should be part of the seed datastructure (per block)
int edge2 = edge >> 1;
int xorig = x - edge2;
int yorig = y - edge2;
int xid = xorig + threadIdx.x; // 2 elements per thread
int yid1 = yorig + threadIdx.y;
int yid2 = yorig + edge - 1 - threadIdx.y;
#endif
int flag = 1;
int flag1 = 1;
int flag2 = 1;
int WW = width; // array width (argument)
if (xid < 0 || xid >= WW) flag = 0;
int HH = WW; // height of flat texture // MUST READ AS ARGUMENT
if (yid1 < 0 || yid1 >= HH) flag1 = 0;
if (yid2 < 0 || yid2 >= HH) flag2 = 0;
int arrayid1 = xid + yid1 * WW;
int arrayid2 = xid + yid2 * WW;
__syncthreads();
// the data can be in arbitrary order in the shared array
float4 f;
//flag = 1;
//flag1 = 1;
//flag2 = 1;
if (flag == 1 && flag1 == 1) {
f = g_idata[arrayid1];
if (int(f.w) == seed.w)
{
f.x = xid;
f.y = yid1;
f.w = 1.;
temp[2*thid] = f;
}
}
if (flag == 1 && flag2 == 1) {
f = g_idata[arrayid2];
if (int(f.w) == seed.w)
{
f.x = xid;
f.y = yid2;
f.w = 1.;
temp[2*thid+1] = f;
}
}
#if 0
__syncthreads();
g_idata[2*thid] = temp[2*thid];
g_idata[2*thid+1] = temp[2*thid+1];
//g_idata[2*thid] = make_float4(arrayid1, arrayid2, 1,1);
//g_idata[2*thid+1] = make_float4(xid,yid1,arrayid1,WW); // ok
//g_idata[2*thid+1] = make_float4(f.w,seed.w,0,0);
//g_idata[2*thid] = (g_idata[arrayid1]);
//g_idata[2*thid+1] = (g_idata[arrayid2]);
//g_idata[2*thid] = make_float4(seed.x,seed.y,seed.z,seed.w);
//g_idata[2*thid+1] = make_float4(seed.x,seed.y,seed.z,seed.w);
return;
#endif
int offset = 1;
#if 1
// xorig - edge/2, xorig + edge/2 - 1
#if 1
#if 1
// build the sum in place up the tree
for (int d = n>>1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi].x += temp[ai].x;
temp[bi].y += temp[ai].y;
temp[bi].z += temp[ai].z;
temp[bi].w += temp[ai].w;
}
offset <<= 1;
}
#endif
#
// Something wrong with the results
// write results to global memory
__syncthreads();
if (thid == (numThreads-1)) {
float nbs = temp[n-1].w;
float nbs1 = 1./(nbs*width);
if (nbs == 0) nbs = 1.;
sum[blockId] = make_float4(temp[n-1].x*nbs1, temp[n-1].y*nbs1, 0., nbs); //, nbs);
}
#endif
#endif
}
//----------------------------------------------------------------------
// More efficient version of scan_workefficient_2 (more threads + remove non-coalesced reads)
__global__ void scan_workefficient_3(float4 *g_idata, float4 *sum, int4* seeds, int n, int edge, int width)
{
// Dynamically allocated shared memory for scan kernels
extern __shared__ float4 temp[];
float* tempf = (float*) temp;
//if (blockIdx.x != 2) return;
// float* g_idata_f = (float*) g_idata;
//float f1 = g_idata_f[0];
int numThreads = blockDim.x * blockDim.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int thid = threadIdx.x + blockDim.x * threadIdx.y;
// get data from global memory (should be coalesced)
int thid2 = thid<<1;
int blockId = blockIdx.x;
int4 seed;
if (thid == 0) {
seed = *(seeds+blockId);
temp[numThreads*2+1] = make_float4(seed.x,seed.y,seed.z,seed.w);
}
__syncthreads();
seed = MAKE_INT4(temp[numThreads*2+1]);
//int4 seed = make_int4(100,40,0,5);
// compute 2D flat texture coordinate from 3D seed coordinate
int x = seed.x;
int y = seed.y;
// edge should be part of the seed datastructure (per block)
int edge2 = edge >> 1;
int xorig = x - edge2;
int yorig = y - edge2;
int flag1;
int xid = xorig + threadIdx.x; // 2 elements per thread
int WW = width; // array width (argument)
int HH = WW; // height of flat texture // MUST READ AS ARGUMENT
//--------------------
for (int j=0; j < 1; j++) { // the loop added 2 registers (could be unrolled)
__syncthreads();
flag1 = 1;
// need for each of the strings separately
if (xid < 0 || xid >= WW) {
flag1 = 0;
}
temp[thid+j*numThreads] = g_idata[j*numThreads+thid];
temp[thid+j*numThreads].w = 1.;
int yid1 = yorig + threadIdx.y + j*numThreads;
if (yid1 < 0 || yid1 >= HH) flag1 = 0;
int arrayid1 = xid + yid1 * WW;
__syncthreads();
// the data can be in arbitrary order in the shared array
// CREATES uncoalesced loads (HOW POSSIBLE?)
// 1.1 ms if if statement is commented out
// 1.8 ms if if statement is not commented out
//return;
if (flag1 == 0) {
// creates incoherent loads
temp[thid] = make_float4(0.,0.,0.,0.);
}
} // end of for loop
//return;
//--------------------
int offset = 1;
// xorig - edge/2, xorig + edge/2 - 1
// build the sum in place up the tree
for (int d = n>>1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
#if 1
tempf[bi] += tempf[ai];
tempf[bi+numThreads] += tempf[ai+numThreads];
tempf[bi+numThreads << 1] += tempf[ai+numThreads << 1];
tempf[bi+numThreads << 1 + numThreads] += tempf[ai+numThreads << 1 + numThreads];
#endif
#
#if 0
temp[bi].x += temp[ai].x;
temp[bi].y += temp[ai].y;
temp[bi].z += temp[ai].z;
temp[bi].w += temp[ai].w;
#endif
}
offset <<= 1;
}
// Something wrong with the results
// write results to global memory
__syncthreads();
if (thid == (numThreads-1)) {
float nbs = temp[n-1].w;
float nbs1 = 1./(nbs*width);
if (nbs == 0) nbs = 1.;
sum[blockId] = make_float4(temp[n-1].x*nbs1, temp[n-1].y*nbs1, 0., nbs); //, nbs);
}
}
//----------------------------------------------------------------------
// More efficient version of scan_workefficient_2 (more threads + remove non-coalesced reads)
__global__ void scan_test_incoherent(float4 *g_idata, float4 *sum, int4* seeds, int n, int edge, int width)
{
// Dynamically allocated shared memory for scan kernels
extern __shared__ float4 temp[];
//if (blockIdx.x != 2) return;
//float* g_idata_f = (float*) g_idata;
int numThreads = blockDim.x * blockDim.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int thid = threadIdx.x + blockDim.x * threadIdx.y;
temp[thid] = make_float4(0.,0.,0.,0.);
return;
}
//----------------------------------------------------------------------
// More efficient version of scan_workefficient_2 (more threads + remove non-coalesced reads)
// Use more threads by reading floats instead of float4
__global__ void scan_workefficient_4(float4 *g_idata, float4 *sum, int4* seeds, int n, int edge, int width)
{
// Dynamically allocated shared memory for scan kernels
extern __shared__ float4 temp[];
float* tempf = (float*) temp;
//if (blockIdx.x != 2) return;
float* g_idata_f = (float*) g_idata;
//float f1 = g_idata_f[0];
// blockDim.x == edge (will generalize later)
int numThreads = blockDim.x * blockDim.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int thid = threadIdx.x + blockDim.x * threadIdx.y;
// get data from global memory (should be coalesced)
int thid2 = thid<<1;
int blockId = blockIdx.x;
int4& seed = *(seeds+blockId);
// compute 2D flat texture coordinate from 3D seed coordinate
int x = seed.x;
int y = seed.y;
// edge should be part of the seed datastructure (per block)
int edge2 = edge >> 1;
int xorig = x - edge2;
int yorig = y - edge2;
int flag1;
int xid = xorig + threadIdx.x; // 2 elements per thread
int WW = width; // array width (argument)
int HH = WW; // height of flat texture // MUST READ AS ARGUMENT
//--------------------
int j = 0;
//for (int j=0; j < 1; j++) { //} the loop added 2 registers (could be unrolled)
__syncthreads();
flag1 = 1;
// need for each of the strings separately
if (xid < 0 || xid >= WW) {
flag1 = 0;
}
int yid1 = yorig + threadIdx.y + j*numThreads;
int arrayid1 = xid + yid1 * WW;
// 16 x 16 threads, tile: 16x16 float4 ==> 64 x 16 floats
// break threads: 32 x 8
int tid = threadIdx.x + blockDim.x * threadIdx.y;
// tid = 0 ==. array[0,0] // column-major (Fortran)
// tid = 1 ==. array[1,0]
// tid = 15 ==. array[15,0]
// tid = 16 ==. array[16,0]
// tid = 17 ==. array[17,0]
int warp_base = tid >> 5; // divide by 32 // array row [0,...,7]
int thread_in_warp = tid - (warp_base << 5); // [0,...,31]
// warp 0: array[0,0] --> array[31,0]
// warp 1: array[32,0] --> array[63,0]
// warp 2: array[0,1] --> array[31,1]
// warp 3: array[32,1] --> array[63,1]
// warp 4: array[0,2] --> array[31,2]
// warp 5: array[32,2] --> array[63,2]
// warp 6: array[0,3] --> array[31,3]
// warp 7: array[32,3] --> array[63,3] // 4th row
// There are 16 rows in the array. Create a loop:
// for (int i=0; i < 4; i++) {
// warp 0: array[0,i*4] --> array[31,i*4]
// warp 7: array[32,i*4+3] --> array[63,i*4]
// Eventually generalize to more arrays
// two warps per row
// arrayid = thread_in_warp + warp_base * WW;
// tempf[warp_base] = g_idata[array_id] // floats (64 per row)
// temp[thid+numThreads] = g_idata[array_id]
// I could be exceeding memory bounds. So how to read coalesced without
temp[thid] = g_idata[arrayid1];
temp[thid].w = 1.;
if (yid1 < 0 || yid1 >= HH) flag1 = 0;
__syncthreads();
// the data can be in arbitrary order in the shared array
// CREATES uncoalesced loads (HOW POSSIBLE?)
// 1.1 ms if if statement is commented out
// 1.8 ms if if statement is not commented out
//return;
#if 1
if (flag1 == 0) {
// creates incoherent loads
temp[thid] = make_float4(0.,0.,0.,0.);
}
#endif
//} // end of for loop
//--------------------
//return;
int offset = 1;
// xorig - edge/2, xorig + edge/2 - 1
// build the sum in place up the tree
for (int d = n>>1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi].x += temp[ai].x;
temp[bi].y += temp[ai].y;
temp[bi].z += temp[ai].z;
temp[bi].w += temp[ai].w;
}
offset <<= 1;
}
// Something wrong with the results
// write results to global memory
__syncthreads();
if (thid == (numThreads-1)) {
float nbs = temp[n-1].w;
float nbs1 = 1./(nbs*width);
if (nbs == 0) nbs = 1.;
sum[blockId] = make_float4(temp[n-1].x*nbs1, temp[n-1].y*nbs1, 0., nbs); //, nbs);
}
}
//----------------------------------------------------------------------
__global__ void scan_workefficient_5(float4 *g_idata, float4 *sum, int4* seeds, int n, int edge, int width)
// More efficient version of scan_workefficient_2 (more threads + remove non-coalesced reads)
// Use more threads by reading floats instead of float4
{
// edge=16, 64 threads: scan each row, one float per thread
// Dynamically allocated shared memory for scan kernels
extern __shared__ float4 temp[];
float* tempf = (float*) temp;
float* sumf = (float*) sum;
// blockDim.x == edge (will generalize later)
int numThreads = blockDim.x * blockDim.y;
// if (blockIdx.x > 1000) return;
#if 1
// SOMETHING NOT WORKING
int last_share = edge*edge + ((edge*edge) >> LOG_NUM_BANKS);
//int last_share = 0;
if (threadIdx.x == 0) {
int4 ss = seeds[blockIdx.x];
temp[last_share] = make_float4(ss.x+0.1,ss.y+0.1,ss.z+0.1,ss.w+0.1);
}
__syncthreads();
int4 seed = MAKE_INT4(temp[last_share]); // is int correct? Or must add 0.5?
__syncthreads();
#endif
#if 0
//int4 seed = make_int4(8,8,0,311); // TEST SEED
int4 seed = seeds[blockIdx.x];
#endif
float* g_idata_f = (float*) g_idata;
// get data from global memory (should be coalesced)
int x = seed.x;
int y = seed.y;
// edge should be part of the seed datastructure (per block)
int edge2 = edge >> 2;
int xorig = x - edge2; // one thread per float (edge float4's)
int yorig = y - edge2; // edge rows
// align xorig such that xorig is a multiple of 2 (speedup is evident)
//xorig = (xorig >> 1) << 1; // xorig is divisble by 2^1
int shift = xorig - ((xorig >> 2) << 2);
if (shift == 1) xorig -= 1;
else if (shift == 2) xorig += 2;
else if (shift == 3) xorig += 1;
//else return;
int flag1;
int WW = width; // array width (argument) (in float4)
int HH = WW; // height of flat texture // MUST READ AS ARGUMENT
int xid = 4*xorig + threadIdx.x; // measured in floats
//--------------------
// one iteration per row in the square tile
for (int j=0; j < edge; j++) { // the loop added 2 registers (could be unrolled)
__syncthreads();
flag1 = 1;
// need for each of the strings separately
if (xid < 0 || xid >= (WW*4)) flag1 = 0;
int yid1 = yorig + j;
if (yid1 < 0 || yid1 >= HH) flag1 = 0;
int arrayid1 = xid + yid1*WW*4; // WW*4 floats
// I MUST ALSO CHECK THE SEED VALUE
tempf[j*4*edge+threadIdx.x] = 0.;
// crashes without this test
if (flag1 != 0) {
tempf[j*4*edge+threadIdx.x] = g_idata_f[arrayid1];
//tempf[j*4*edge+threadIdx.x] = 0.; // very low overhead
}
//sumf[j*4*edge+threadIdx.x] = g_idata_f[arrayid1];
//sumf[j*4*edge+threadIdx.x] = tempf[j*4*edge+threadIdx.x];
//sumf[j*4*edge+threadIdx.x] = tempf[j*4*edge+threadIdx.x];
//sumf[j*4*edge+threadIdx.x] = xorig;
//if (j == 0) {
//sum[threadIdx.x] = make_float4(seed.x,seed.y,seed.z,seed.w);
//return;
//}
__syncthreads();
} // end of for loop
//return;
__syncthreads();
float widthi = 1./width;
for (int j=0; j < edge; j++) {
__syncthreads();
if (threadIdx.x < edge) {
float f = temp[j*edge+threadIdx.x].w + 0.1; // so that int() works
if (int(f) != seed.w) {
temp[j*edge+threadIdx.x] = make_float4(0.,0.,0.,0.);
} else {
temp[j*edge+threadIdx.x].x = (xorig+threadIdx.x) * widthi;
temp[j*edge+threadIdx.x].y = (yorig+j) * widthi;
temp[j*edge+threadIdx.x].w = 1.;
}
}
}
__syncthreads();
#if 0
for (int j=0; j < edge; j++) {
sumf[j*4*edge+threadIdx.x] = tempf[j*4*edge+threadIdx.x];
}
return;
#endif
#if 0
for (int j=0; j < edge; j++) { // the loop added 2 registers (could be unrolled)
//sumf[j*4*edge+threadIdx.x] = g_idata_f[arrayid1];
sumf[j*4*edge+threadIdx.x] = tempf[j*4*edge+threadIdx.x];
//sumf[j*4*edge+threadIdx.x] = xorig;
//sum[threadIdx.x] = make_float4(seed.x,seed.y,seed.z,seed.w);
}
return;
#endif
//--------------------
// xorig - edge/2, xorig + edge/2 - 1
// For the 16x16 case (hardcoded), the first pass with 64 threads can
// only handle 1/2 the domain (1024 floats = 16x16x4). The for loop that
// follows had a thread handle two floats at a time, so can only handl
// 1/2 the domain on each pass
// manually treat each half of the domain
int offset = 1;
//====
int tid, j, ai, bi;
int mx = 8;
tid = threadIdx.x >> 2;
j = threadIdx.x - (tid << 2);
for (int outer=0; outer < 3; outer++) { // HARDCODED
for (int k=0; k < mx; k++) {
__syncthreads();
int off = k * 128 * (1 << outer); // HARDCODED
ai = offset*(2*tid+1)-1;
bi = offset*(2*tid+2)-1;
ai = (ai << 2) + j;
bi = (bi << 2) + j;
tempf[bi+off] += tempf[ai+off];
}
mx >> 1;
offset <<= 1;
}
//====
#if 0
for (int j=0; j < edge; j++) { // the loop added 2 registers (could be unrolled)
//sumf[j*4*edge+threadIdx.x] = g_idata_f[arrayid1];
sumf[j*4*edge+threadIdx.x] = tempf[j*4*edge+threadIdx.x];
//sumf[j*4*edge+threadIdx.x] = xorig;
//sum[threadIdx.x] = make_float4(seed.x,seed.y,seed.z,seed.w);
}
return;
#endif
#
int sz = edge*edge / 2; // (128 for 64 threads)
#if 1
// build the sum in place up the tree
for (int d = sz>>1; d > 0; d >>= 1) {
__syncthreads();
if (threadIdx.x < d)
{
//int tid = threadIdx.x >> 2; // thread id divided by 4
//int j = threadIdx.x - (tid << 2); // 0,1,2,3
int ai = offset*(2*tid+1)-1;
int bi = offset*(2*tid+2)-1;
ai = (ai << 2) + j;
bi = (bi << 2) + j;
tempf[bi] += tempf[ai];
}
offset <<= 1;
}
#endif
#
#if 0
for (int j=0; j < edge; j++) { // the loop added 2 registers (could be unrolled)
//sumf[j*4*edge+threadIdx.x] = g_idata_f[arrayid1];
sumf[j*4*edge+threadIdx.x] = tempf[j*4*edge+threadIdx.x];
//sumf[j*4*edge+threadIdx.x] = xorig;
//sum[threadIdx.x] = make_float4(seed.x,seed.y,seed.z,seed.w);
}
//return;
#endif
// Something wrong with the results
// write results to global memory
__syncthreads();
if (threadIdx.x == (numThreads-1)) {
int el = edge*edge-1;
//int el = 0;
float nbs = temp[el].w;
//float nbs1 = 1./(nbs*width);
float nbs1 = 1./(nbs);
if (nbs <= 0.5) nbs1 = 1.;
//sum[blockIdx.x] = make_float4(temp[el].x*nbs1, temp[el].y*nbs1, width, nbs);
sum[blockIdx.x] = make_float4(temp[el].x*nbs1, temp[el].y*nbs1, 0., nbs); // orig
}
}
//----------------------------------------------------------------------
__global__ void scan_workefficient_6(float4 *g_idata, float4 *sum, int4* seeds, int n, int edge, int width)
// More efficient version of scan_workefficient_2 (more threads + remove non-coalesced reads)
// Use more threads by reading floats instead of float4
// Remove bank conflicts (decrease serialized_warps)
{
// edge=16, 64 threads: scan each row, one float per thread
// Dynamically allocated shared memory for scan kernels
extern __shared__ float4 temp[];
float* tempf = (float*) temp;
float* sumf = (float*) sum;
// blockDim.x == edge (will generalize later)
int numThreads = blockDim.x * blockDim.y;
// if (blockIdx.x > 1000) return;
#if 1
// SOMETHING NOT WORKING
int last_share = edge*edge + ((edge*edge) >> LOG_NUM_BANKS);
//int last_share = 0;
if (threadIdx.x == 0) {
int4 ss = seeds[blockIdx.x];
temp[last_share] = make_float4(ss.x+0.1,ss.y+0.1,ss.z+0.1,ss.w+0.1);
}
__syncthreads();
int4 seed = MAKE_INT4(temp[last_share]); // is int correct? Or must add 0.5?
__syncthreads();
#endif
#if 0
//int4 seed = make_int4(8,8,0,311); // TEST SEED
int4 seed = seeds[blockIdx.x];
#endif
float* g_idata_f = (float*) g_idata;
// get data from global memory (should be coalesced)
int x = seed.x;
int y = seed.y;
// edge should be part of the seed datastructure (per block)
int edge2 = edge >> 2;
int xorig = x - edge2; // one thread per float (edge float4's)
int yorig = y - edge2; // edge rows
// align xorig such that xorig is a multiple of 2 (speedup is evident)
//xorig = (xorig >> 1) << 1; // xorig is divisble by 2^1
int shift = xorig - ((xorig >> 2) << 2);
if (shift == 1) xorig -= 1;
else if (shift == 2) xorig += 2;
else if (shift == 3) xorig += 1;
//else return;
int flag1;
int WW = width; // array width (argument) (in float4)
int HH = WW; // height of flat texture // MUST READ AS ARGUMENT
int xid = 4*xorig + threadIdx.x; // measured in floats
//--------------------
// one iteration per row in the square tile
for (int j=0; j < edge; j++) { // the loop added 2 registers (could be unrolled)
__syncthreads();
flag1 = 1;
// need for each of the strings separately
if (xid < 0 || xid >= (WW*4)) flag1 = 0;
int yid1 = yorig + j;
if (yid1 < 0 || yid1 >= HH) flag1 = 0;
int arrayid1 = xid + yid1*WW*4; // WW*4 floats
// I MUST ALSO CHECK THE SEED VALUE
TMPF(j*4*edge+threadIdx.x) = 0.;
// crashes without this test
if (flag1 != 0) {
TMPF(j*4*edge+threadIdx.x) = g_idata_f[arrayid1];
//tempf[j*4*edge+threadIdx.x] = 0.; // very low overhead
}
//sumf[j*4*edge+threadIdx.x] = g_idata_f[arrayid1];
//sumf[j*4*edge+threadIdx.x] = tempf[j*4*edge+threadIdx.x];
//sumf[j*4*edge+threadIdx.x] = tempf[j*4*edge+threadIdx.x];
//sumf[j*4*edge+threadIdx.x] = xorig;
//if (j == 0) {
//sum[threadIdx.x] = make_float4(seed.x,seed.y,seed.z,seed.w);
//return;
//}
__syncthreads();
} // end of for loop
//return;
__syncthreads();
float widthi = 1./width;
for (int j=0; j < edge; j++) {
__syncthreads();
if (threadIdx.x < edge) {
float f = temp[j*edge+threadIdx.x].w + 0.1; // so that int() works
if (int(f) != seed.w) {
temp[j*edge+threadIdx.x] = make_float4(0.,0.,0.,0.);
} else {
temp[j*edge+threadIdx.x].x = (xorig+threadIdx.x) * widthi;
temp[j*edge+threadIdx.x].y = (yorig+j) * widthi;
temp[j*edge+threadIdx.x].w = 1.;
}
}
}
__syncthreads();
#if 0
for (int j=0; j < edge; j++) {
sumf[j*4*edge+threadIdx.x] = TMPF(j*4*edge+threadIdx.x);
}
return;
#endif
#if 0
for (int j=0; j < edge; j++) { // the loop added 2 registers (could be unrolled)
//sumf[j*4*edge+threadIdx.x] = g_idata_f[arrayid1];
sumf[j*4*edge+threadIdx.x] = TMPF(j*4*edge+threadIdx.x);
//sumf[j*4*edge+threadIdx.x] = xorig;
//sum[threadIdx.x] = make_float4(seed.x,seed.y,seed.z,seed.w);
}
return;
#endif
//--------------------
// xorig - edge/2, xorig + edge/2 - 1
// For the 16x16 case (hardcoded), the first pass with 64 threads can
// only handle 1/2 the domain (1024 floats = 16x16x4). The for loop that
// follows had a thread handle two floats at a time, so can only handl
// 1/2 the domain on each pass
// manually treat each half of the domain
int offset = 1;
//====
int mx = 8;
for (int outer=0; outer < 3; outer++) { // HARDCODED
for (int k=0; k < mx; k++) {
__syncthreads();
int off = k * 128 * (1 << outer); // HARDCODED
int tid = threadIdx.x >> 2;
int j = threadIdx.x - (tid << 2);
int ai = offset*(2*tid+1)-1;
int bi = offset*(2*tid+2)-1;
ai = (ai << 2) + j;
bi = (bi << 2) + j;
TMPF(bi+off) += TMPF(ai+off);
}
mx >> 1;
offset <<= 1;
}
//====
#if 0
for (int j=0; j < edge; j++) { // the loop added 2 registers (could be unrolled)
//sumf[j*4*edge+threadIdx.x] = g_idata_f[arrayid1];
sumf[j*4*edge+threadIdx.x] = TMPF(j*4*edge+threadIdx.x);
//sumf[j*4*edge+threadIdx.x] = xorig;
//sum[threadIdx.x] = make_float4(seed.x,seed.y,seed.z,seed.w);
}
return;
#endif
int sz = edge*edge / 2; // (128 for 64 threads)
#if 1
// build the sum in place up the tree
for (int d = sz>>1; d > 0; d >>= 1) {
__syncthreads();
if (threadIdx.x < d)
{
int tid = threadIdx.x >> 2; // thread id divided by 4
int j = threadIdx.x - (tid << 2); // 0,1,2,3
int ai = offset*(2*tid+1)-1;
int bi = offset*(2*tid+2)-1;
ai = (ai << 2) + j;
bi = (bi << 2) + j;
TMPF(bi) += TMPF(ai);
}
offset <<= 1;
}
#endif
#
#if 0
for (int j=0; j < edge; j++) { // the loop added 2 registers (could be unrolled)
//sumf[j*4*edge+threadIdx.x] = g_idata_f[arrayid1];
sumf[j*4*edge+threadIdx.x] = TMPF(j*4*edge+threadIdx.x);
//sumf[j*4*edge+threadIdx.x] = xorig;
//sum[threadIdx.x] = make_float4(seed.x,seed.y,seed.z,seed.w);
}
return;
#endif
// Something wrong with the results
// write results to global memory
__syncthreads();
if (threadIdx.x == (numThreads-1)) {
int el = edge*edge-1;
//int el = 0;
float nbs = temp[el].w;
//float nbs1 = 1./(nbs*width);
float nbs1 = 1./(nbs);
if (nbs <= 0.5) nbs1 = 1.;
//sum[blockIdx.x] = make_float4(temp[el].x*nbs1, temp[el].y*nbs1, width, nbs);
sum[blockIdx.x] = make_float4(temp[el].x*nbs1, temp[el].y*nbs1, 0., nbs); // orig
}
}
//----------------------------------------------------------------------
__global__ void scan_workefficient_7(float4 *g_idata, float4 *sum, int4* seeds, int n, int edge, int width)
// More efficient version of scan_workefficient_2 (more threads + remove non-coalesced reads)
// Use more threads by reading floats instead of float4
{
// edge=16, 64 threads: scan each row, one float per thread
// Dynamically allocated shared memory for scan kernels
extern __shared__ float4 temp[];
float* tempf = (float*) temp;
float* sumf = (float*) sum;
// blockDim.x == edge (will generalize later)
int numThreads = blockDim.x * blockDim.y;
//if (blockIdx.x != 11) return; // block 13 has serial errors
#if 1
// SOMETHING NOT WORKING
int last_share = edge*edge + ((edge*edge) >> LOG_NUM_BANKS);
//int last_share = 0;
if (threadIdx.x == 0) {
int4 ss = seeds[blockIdx.x];
temp[last_share] = make_float4(ss.x+0.1,ss.y+0.1,ss.z+0.1,ss.w+0.1);
}
__syncthreads();
int4 seed = MAKE_INT4(temp[last_share]); // is int correct? Or must add 0.5?
__syncthreads();
#endif
#if 0
//int4 seed = make_int4(8,8,0,311); // TEST SEED
int4 seed = seeds[blockIdx.x];
#endif
float* g_idata_f = (float*) g_idata;
// get data from global memory (should be coalesced)
int x = seed.x;
int y = seed.y;
// edge should be part of the seed datastructure (per block)
int edge2 = edge >> 2;
int xorig = x - edge2; // one thread per float (edge float4's)
int yorig = y - edge2; // edge rows
// align xorig such that xorig is a multiple of 2 (speedup is evident)
//xorig = (xorig >> 1) << 1; // xorig is divisble by 2^1
int shift = xorig - ((xorig >> 2) << 2);
if (shift == 1) xorig -= 1;
else if (shift == 2) xorig += 2;
else if (shift == 3) xorig += 1;
//else return;
int flag1;
int WW = width; // array width (argument) (in float4)
int HH = WW; // height of flat texture // MUST READ AS ARGUMENT
//--------------------
// one iteration per row in the square tile
for (int j=0; j < edge; j+=4) { // the loop added 2 registers (could be unrolled)
__syncthreads();
//
int subtid = j >> 4; // 0, 1, ..., numThreads/4
int subrow = j - subtid;
//
flag1 = 1;
//
// // need for each of the strings separately
int xid = 4*xorig + threadIdx.x; // measured in floats
if (xid < 0 || xid >= (WW*4)) flag1 = 0;
//
int yid1 = yorig + j + threadIdx.y;
if (yid1 < 0 || yid1 >= HH) flag1 = 0;
//
int arrayid1 = xid + yid1*WW*4; // WW*4 floats
//
// // I MUST ALSO CHECK THE SEED VALUE
//
//
int jj = j+threadIdx.y;
tempf[jj*4*edge+threadIdx.x] = 0.;
//
// // crashes without this test
if (flag1 != 0) {
tempf[jj*4*edge+threadIdx.x] = g_idata_f[arrayid1];
//tempf[jj*4*edge+threadIdx.x] = 0.; // very low overhead
}
//
//
// //sumf[jj*4*edge+threadIdx.x] = g_idata_f[arrayid1];
// //sumf[jj*4*edge+threadIdx.x] = tempf[jj*4*edge+threadIdx.x];
// //sumf[jj*4*edge+threadIdx.x] = tempf[jj*4*edge+threadIdx.x];
// //sumf[jj*4*edge+threadIdx.x] = xorig;
//
// //if (j == 0) {
// //sum[threadIdx.x] = make_float4(seed.x,seed.y,seed.z,seed.w);
// //return;
// //}
//
__syncthreads();
} // end of for loop
//
//
__syncthreads();
//
float widthi = 1./width;
int thread_id = threadIdx.x + blockDim.x * threadIdx.y;
// use float4
// not the problem
#if 0
// 256 threads
int tid = thread_id >> 2;
int j4 = thread_id - (tid << 2);
int (int j=0; j < 4; j++) {
__syncthreads();
if (j4 == 3) {
float f = tempf[j*256 + tid + j4] + 0.1;
if (int(f) != seed.w) {
//tempf[j*256+thread_id] = 0.;
temp[j*64+threadIdx.x] = make_float4(0.,0.,0.,0.);
}
}
}
#endif
// use float4
// not the problem
#if 1
for (int j=0; j < edge; j++) { // takes 1.7 ms
__syncthreads();
if (threadIdx.x < edge && threadIdx.y == 0) {
int tid = threadIdx.x;
float f = temp[j*16+tid].w + 0.1; // so that int() works
if (int(f) != seed.w) {
temp[j*16+tid] = make_float4(0.,0.,0.,0.); // cause of serialization
} // else {
//float4 g;
// Will do this later
//g.x = (xorig+threadIdx.x) * widthi;
//g.y = (yorig+j) * widthi;
//g.z = 0.;
//g.w = 1.;
//temp[j*edge+threadIdx.x] = g;
// }
}
}
#endif
// use float
#if 0
for (int j=0; j < edge; j++) { // takes 1.7 ms
__syncthreads();
if (threadIdx.x < edge && threadIdx.y == 0) {
int tid = thread_id >> 2;
int j = thread_id - (tid << 2);
float f = tempf[4*j*16+tid+j] + 0.1; // so that int() works
if (int(f) != seed.w) {
tempf[4*j*16+tid] = 0.;
tempf[4*j*16+tid+1] = 0.;
tempf[4*j*16+tid+2] = 0.;
tempf[4*j*16+tid+3] = 0.;
} else {
float4 g;
// Will do this later
//g.x = (xorig+threadIdx.x) * widthi;
//g.y = (yorig+j) * widthi;
//g.z = 0.;
//g.w = 1.;
//temp[jj*edge+threadIdx.x] = g;
}
}
}
#endif
__syncthreads();
//return;
#if 0
for (int j=0; j < 4; j++) { // the loop added 2 registers (could be unrolled)
sumf[j*256+thread_id] = tempf[j*256+thread_id];
}
return;
#endif
// //--------------------
//
//// xorig - edge/2, xorig + edge/2 - 1
//
//// For the 16x16 case (hardcoded), the first pass with 64 threads can
//// only handle 1/2 the domain (1024 floats = 16x16x4). The for loop that
//// follows had a thread handle two floats at a time, so can only handl
//// 1/2 the domain on each pass
//
//// manually treat each half of the domain
//
int offset = 1;
// //====
//
int tid, j4, ai, bi;
// for (int j=0; j < 4; j++) { // the loop added 2 registers (could be unrolled)
//sumf[j*256+thread_id] = tempf[j*256+thread_id];
//}
//return;
tid = thread_id >> 2;
j4 = thread_id - (tid << 2);
for (int k=0; k < 2; k++) {
__syncthreads();
int off = k * 512;
ai = offset*(2*tid+1)-1;
bi = offset*(2*tid+2)-1;
ai = (ai << 2) + j4;
bi = (bi << 2) + j4;
tempf[bi+off] += tempf[ai+off];
}
offset <<= 1;
//====
__syncthreads();
#if 0
for (int j=0; j < 4; j++) { // the loop added 2 registers (could be unrolled)
sumf[j*256+thread_id] = tempf[j*256+thread_id];
}
return;
#endif
//
//
int sz = 512; // * edge*edge; // (512 for 256 threads)
//return;
//
#if 1
// build the sum in place up the tree
for (int d = sz>>3; d > 0; d >>= 1) {
__syncthreads();
if (thread_id < (d*4)) // 4 subthreads per thread: 64*4 = 256
{
int ai = offset*(2*tid+1)-1;
int bi = offset*(2*tid+2)-1;
ai = (ai << 2) + j4;
bi = (bi << 2) + j4;
tempf[bi] += tempf[ai];
}
offset <<= 1;
//if (d == 0) break;
}
#endif
#if 0
for (int j=0; j < 4; j++) { // the loop added 2 registers (could be unrolled)
sumf[j*256+thread_id] = tempf[j*256+thread_id];
}
return;
#endif
// Something wrong with the results
// write results to global memory
__syncthreads();
if (threadIdx.x == (blockDim.x-1)) {
int el = edge*edge-1;
//int el = 0;
float nbs = temp[el].w;
//float nbs1 = 1./(nbs*width);
float nbs1 = 1./(nbs);
if (nbs <= 0.5) nbs1 = 1.;
//sum[blockIdx.x] = make_float4(temp[el].x*nbs1, temp[el].y*nbs1, width, nbs);
sum[blockIdx.x] = make_float4(temp[el].x*nbs1, temp[el].y*nbs1, 0., nbs); // orig
}
}
//----------------------------------------------------------------------
#endif // #ifndef _SCAN_WORKEFFICIENT_KERNEL_H_
| scan_efficient.cu |
#ifndef _SCAN_WORKEFFICIENT_KERNEL_H_
#define _SCAN_WORKEFFICIENT_KERNEL_H_
#
#include "local_macros.h"
#define TIDX (__mul24(blockIdx.x,blockDim.x) + threadIdx.x)
#define TIDY (__mul24(blockIdx.y,blockDim.y) + threadIdx.y)
#define TWIDTH (__mul24(gridDim.x,blockDim.x))
#define THEIGHT (__mul24(gridDim.y,blockDim.y))
#define ArrayID (TIDY*TWIDTH+TIDX)
#define MAKE_FLOAT4(arg) make_float4((arg), (arg), (arg), (arg))
#define MAKE_INT4(arg) make_int4((arg).x, (arg).y, (arg).z, (arg).w);
// Written by NVidia
// Modified by Gordon Erlebacher, Feb. 21, 2008
//----------------------------------------------------------------------
__global__ void scan_workefficient_2(float4 *g_idata, float4 *sum, int4* seeds, int n, int edge, int width)
{
// Dynamically allocated shared memory for scan kernels
#if 1
extern __shared__ float4 temp[];
float4 zero = make_float4(0.,0.,0.,0.);
//if (blockIdx.x != 2) return;
int numThreads = blockDim.x * blockDim.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int thid = threadIdx.x + blockDim.x * threadIdx.y;
temp[2*thid] = zero;
temp[2*thid+1] = zero;
int blockId = blockIdx.x;
int4& seed = *(seeds+blockId);
// compute 2D flat texture coordinate from 3D seed coordinate
int x = seed.x;
int y = seed.y;
// edge should be part of the seed datastructure (per block)
int edge2 = edge >> 1;
int xorig = x - edge2;
int yorig = y - edge2;
int xid = xorig + threadIdx.x; // 2 elements per thread
int yid1 = yorig + threadIdx.y;
int yid2 = yorig + edge - 1 - threadIdx.y;
#endif
int flag = 1;
int flag1 = 1;
int flag2 = 1;
int WW = width; // array width (argument)
if (xid < 0 || xid >= WW) flag = 0;
int HH = WW; // height of flat texture // MUST READ AS ARGUMENT
if (yid1 < 0 || yid1 >= HH) flag1 = 0;
if (yid2 < 0 || yid2 >= HH) flag2 = 0;
int arrayid1 = xid + yid1 * WW;
int arrayid2 = xid + yid2 * WW;
__syncthreads();
// the data can be in arbitrary order in the shared array
float4 f;
//flag = 1;
//flag1 = 1;
//flag2 = 1;
if (flag == 1 && flag1 == 1) {
f = g_idata[arrayid1];
if (int(f.w) == seed.w)
{
f.x = xid;
f.y = yid1;
f.w = 1.;
temp[2*thid] = f;
}
}
if (flag == 1 && flag2 == 1) {
f = g_idata[arrayid2];
if (int(f.w) == seed.w)
{
f.x = xid;
f.y = yid2;
f.w = 1.;
temp[2*thid+1] = f;
}
}
#if 0
__syncthreads();
g_idata[2*thid] = temp[2*thid];
g_idata[2*thid+1] = temp[2*thid+1];
//g_idata[2*thid] = make_float4(arrayid1, arrayid2, 1,1);
//g_idata[2*thid+1] = make_float4(xid,yid1,arrayid1,WW); // ok
//g_idata[2*thid+1] = make_float4(f.w,seed.w,0,0);
//g_idata[2*thid] = (g_idata[arrayid1]);
//g_idata[2*thid+1] = (g_idata[arrayid2]);
//g_idata[2*thid] = make_float4(seed.x,seed.y,seed.z,seed.w);
//g_idata[2*thid+1] = make_float4(seed.x,seed.y,seed.z,seed.w);
return;
#endif
int offset = 1;
#if 1
// xorig - edge/2, xorig + edge/2 - 1
#if 1
#if 1
// build the sum in place up the tree
for (int d = n>>1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi].x += temp[ai].x;
temp[bi].y += temp[ai].y;
temp[bi].z += temp[ai].z;
temp[bi].w += temp[ai].w;
}
offset <<= 1;
}
#endif
#
// Something wrong with the results
// write results to global memory
__syncthreads();
if (thid == (numThreads-1)) {
float nbs = temp[n-1].w;
float nbs1 = 1./(nbs*width);
if (nbs == 0) nbs = 1.;
sum[blockId] = make_float4(temp[n-1].x*nbs1, temp[n-1].y*nbs1, 0., nbs); //, nbs);
}
#endif
#endif
}
//----------------------------------------------------------------------
// More efficient version of scan_workefficient_2 (more threads + remove non-coalesced reads)
__global__ void scan_workefficient_3(float4 *g_idata, float4 *sum, int4* seeds, int n, int edge, int width)
{
// Dynamically allocated shared memory for scan kernels
extern __shared__ float4 temp[];
float* tempf = (float*) temp;
//if (blockIdx.x != 2) return;
// float* g_idata_f = (float*) g_idata;
//float f1 = g_idata_f[0];
int numThreads = blockDim.x * blockDim.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int thid = threadIdx.x + blockDim.x * threadIdx.y;
// get data from global memory (should be coalesced)
int thid2 = thid<<1;
int blockId = blockIdx.x;
int4 seed;
if (thid == 0) {
seed = *(seeds+blockId);
temp[numThreads*2+1] = make_float4(seed.x,seed.y,seed.z,seed.w);
}
__syncthreads();
seed = MAKE_INT4(temp[numThreads*2+1]);
//int4 seed = make_int4(100,40,0,5);
// compute 2D flat texture coordinate from 3D seed coordinate
int x = seed.x;
int y = seed.y;
// edge should be part of the seed datastructure (per block)
int edge2 = edge >> 1;
int xorig = x - edge2;
int yorig = y - edge2;
int flag1;
int xid = xorig + threadIdx.x; // 2 elements per thread
int WW = width; // array width (argument)
int HH = WW; // height of flat texture // MUST READ AS ARGUMENT
//--------------------
for (int j=0; j < 1; j++) { // the loop added 2 registers (could be unrolled)
__syncthreads();
flag1 = 1;
// need for each of the strings separately
if (xid < 0 || xid >= WW) {
flag1 = 0;
}
temp[thid+j*numThreads] = g_idata[j*numThreads+thid];
temp[thid+j*numThreads].w = 1.;
int yid1 = yorig + threadIdx.y + j*numThreads;
if (yid1 < 0 || yid1 >= HH) flag1 = 0;
int arrayid1 = xid + yid1 * WW;
__syncthreads();
// the data can be in arbitrary order in the shared array
// CREATES uncoalesced loads (HOW POSSIBLE?)
// 1.1 ms if if statement is commented out
// 1.8 ms if if statement is not commented out
//return;
if (flag1 == 0) {
// creates incoherent loads
temp[thid] = make_float4(0.,0.,0.,0.);
}
} // end of for loop
//return;
//--------------------
int offset = 1;
// xorig - edge/2, xorig + edge/2 - 1
// build the sum in place up the tree
for (int d = n>>1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
#if 1
tempf[bi] += tempf[ai];
tempf[bi+numThreads] += tempf[ai+numThreads];
tempf[bi+numThreads << 1] += tempf[ai+numThreads << 1];
tempf[bi+numThreads << 1 + numThreads] += tempf[ai+numThreads << 1 + numThreads];
#endif
#
#if 0
temp[bi].x += temp[ai].x;
temp[bi].y += temp[ai].y;
temp[bi].z += temp[ai].z;
temp[bi].w += temp[ai].w;
#endif
}
offset <<= 1;
}
// Something wrong with the results
// write results to global memory
__syncthreads();
if (thid == (numThreads-1)) {
float nbs = temp[n-1].w;
float nbs1 = 1./(nbs*width);
if (nbs == 0) nbs = 1.;
sum[blockId] = make_float4(temp[n-1].x*nbs1, temp[n-1].y*nbs1, 0., nbs); //, nbs);
}
}
//----------------------------------------------------------------------
// More efficient version of scan_workefficient_2 (more threads + remove non-coalesced reads)
__global__ void scan_test_incoherent(float4 *g_idata, float4 *sum, int4* seeds, int n, int edge, int width)
{
// Dynamically allocated shared memory for scan kernels
extern __shared__ float4 temp[];
//if (blockIdx.x != 2) return;
//float* g_idata_f = (float*) g_idata;
int numThreads = blockDim.x * blockDim.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int thid = threadIdx.x + blockDim.x * threadIdx.y;
temp[thid] = make_float4(0.,0.,0.,0.);
return;
}
//----------------------------------------------------------------------
// More efficient version of scan_workefficient_2 (more threads + remove non-coalesced reads)
// Use more threads by reading floats instead of float4
__global__ void scan_workefficient_4(float4 *g_idata, float4 *sum, int4* seeds, int n, int edge, int width)
{
// Dynamically allocated shared memory for scan kernels
extern __shared__ float4 temp[];
float* tempf = (float*) temp;
//if (blockIdx.x != 2) return;
float* g_idata_f = (float*) g_idata;
//float f1 = g_idata_f[0];
// blockDim.x == edge (will generalize later)
int numThreads = blockDim.x * blockDim.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int thid = threadIdx.x + blockDim.x * threadIdx.y;
// get data from global memory (should be coalesced)
int thid2 = thid<<1;
int blockId = blockIdx.x;
int4& seed = *(seeds+blockId);
// compute 2D flat texture coordinate from 3D seed coordinate
int x = seed.x;
int y = seed.y;
// edge should be part of the seed datastructure (per block)
int edge2 = edge >> 1;
int xorig = x - edge2;
int yorig = y - edge2;
int flag1;
int xid = xorig + threadIdx.x; // 2 elements per thread
int WW = width; // array width (argument)
int HH = WW; // height of flat texture // MUST READ AS ARGUMENT
//--------------------
int j = 0;
//for (int j=0; j < 1; j++) { //} the loop added 2 registers (could be unrolled)
__syncthreads();
flag1 = 1;
// need for each of the strings separately
if (xid < 0 || xid >= WW) {
flag1 = 0;
}
int yid1 = yorig + threadIdx.y + j*numThreads;
int arrayid1 = xid + yid1 * WW;
// 16 x 16 threads, tile: 16x16 float4 ==> 64 x 16 floats
// break threads: 32 x 8
int tid = threadIdx.x + blockDim.x * threadIdx.y;
// tid = 0 ==. array[0,0] // column-major (Fortran)
// tid = 1 ==. array[1,0]
// tid = 15 ==. array[15,0]
// tid = 16 ==. array[16,0]
// tid = 17 ==. array[17,0]
int warp_base = tid >> 5; // divide by 32 // array row [0,...,7]
int thread_in_warp = tid - (warp_base << 5); // [0,...,31]
// warp 0: array[0,0] --> array[31,0]
// warp 1: array[32,0] --> array[63,0]
// warp 2: array[0,1] --> array[31,1]
// warp 3: array[32,1] --> array[63,1]
// warp 4: array[0,2] --> array[31,2]
// warp 5: array[32,2] --> array[63,2]
// warp 6: array[0,3] --> array[31,3]
// warp 7: array[32,3] --> array[63,3] // 4th row
// There are 16 rows in the array. Create a loop:
// for (int i=0; i < 4; i++) {
// warp 0: array[0,i*4] --> array[31,i*4]
// warp 7: array[32,i*4+3] --> array[63,i*4]
// Eventually generalize to more arrays
// two warps per row
// arrayid = thread_in_warp + warp_base * WW;
// tempf[warp_base] = g_idata[array_id] // floats (64 per row)
// temp[thid+numThreads] = g_idata[array_id]
// I could be exceeding memory bounds. So how to read coalesced without
temp[thid] = g_idata[arrayid1];
temp[thid].w = 1.;
if (yid1 < 0 || yid1 >= HH) flag1 = 0;
__syncthreads();
// the data can be in arbitrary order in the shared array
// CREATES uncoalesced loads (HOW POSSIBLE?)
// 1.1 ms if if statement is commented out
// 1.8 ms if if statement is not commented out
//return;
#if 1
if (flag1 == 0) {
// creates incoherent loads
temp[thid] = make_float4(0.,0.,0.,0.);
}
#endif
//} // end of for loop
//--------------------
//return;
int offset = 1;
// xorig - edge/2, xorig + edge/2 - 1
// build the sum in place up the tree
for (int d = n>>1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi].x += temp[ai].x;
temp[bi].y += temp[ai].y;
temp[bi].z += temp[ai].z;
temp[bi].w += temp[ai].w;
}
offset <<= 1;
}
// Something wrong with the results
// write results to global memory
__syncthreads();
if (thid == (numThreads-1)) {
float nbs = temp[n-1].w;
float nbs1 = 1./(nbs*width);
if (nbs == 0) nbs = 1.;
sum[blockId] = make_float4(temp[n-1].x*nbs1, temp[n-1].y*nbs1, 0., nbs); //, nbs);
}
}
//----------------------------------------------------------------------
__global__ void scan_workefficient_5(float4 *g_idata, float4 *sum, int4* seeds, int n, int edge, int width)
// More efficient version of scan_workefficient_2 (more threads + remove non-coalesced reads)
// Use more threads by reading floats instead of float4
{
// edge=16, 64 threads: scan each row, one float per thread
// Dynamically allocated shared memory for scan kernels
extern __shared__ float4 temp[];
float* tempf = (float*) temp;
float* sumf = (float*) sum;
// blockDim.x == edge (will generalize later)
int numThreads = blockDim.x * blockDim.y;
// if (blockIdx.x > 1000) return;
#if 1
// SOMETHING NOT WORKING
int last_share = edge*edge + ((edge*edge) >> LOG_NUM_BANKS);
//int last_share = 0;
if (threadIdx.x == 0) {
int4 ss = seeds[blockIdx.x];
temp[last_share] = make_float4(ss.x+0.1,ss.y+0.1,ss.z+0.1,ss.w+0.1);
}
__syncthreads();
int4 seed = MAKE_INT4(temp[last_share]); // is int correct? Or must add 0.5?
__syncthreads();
#endif
#if 0
//int4 seed = make_int4(8,8,0,311); // TEST SEED
int4 seed = seeds[blockIdx.x];
#endif
float* g_idata_f = (float*) g_idata;
// get data from global memory (should be coalesced)
int x = seed.x;
int y = seed.y;
// edge should be part of the seed datastructure (per block)
int edge2 = edge >> 2;
int xorig = x - edge2; // one thread per float (edge float4's)
int yorig = y - edge2; // edge rows
// align xorig such that xorig is a multiple of 2 (speedup is evident)
//xorig = (xorig >> 1) << 1; // xorig is divisble by 2^1
int shift = xorig - ((xorig >> 2) << 2);
if (shift == 1) xorig -= 1;
else if (shift == 2) xorig += 2;
else if (shift == 3) xorig += 1;
//else return;
int flag1;
int WW = width; // array width (argument) (in float4)
int HH = WW; // height of flat texture // MUST READ AS ARGUMENT
int xid = 4*xorig + threadIdx.x; // measured in floats
//--------------------
// one iteration per row in the square tile
for (int j=0; j < edge; j++) { // the loop added 2 registers (could be unrolled)
__syncthreads();
flag1 = 1;
// need for each of the strings separately
if (xid < 0 || xid >= (WW*4)) flag1 = 0;
int yid1 = yorig + j;
if (yid1 < 0 || yid1 >= HH) flag1 = 0;
int arrayid1 = xid + yid1*WW*4; // WW*4 floats
// I MUST ALSO CHECK THE SEED VALUE
tempf[j*4*edge+threadIdx.x] = 0.;
// crashes without this test
if (flag1 != 0) {
tempf[j*4*edge+threadIdx.x] = g_idata_f[arrayid1];
//tempf[j*4*edge+threadIdx.x] = 0.; // very low overhead
}
//sumf[j*4*edge+threadIdx.x] = g_idata_f[arrayid1];
//sumf[j*4*edge+threadIdx.x] = tempf[j*4*edge+threadIdx.x];
//sumf[j*4*edge+threadIdx.x] = tempf[j*4*edge+threadIdx.x];
//sumf[j*4*edge+threadIdx.x] = xorig;
//if (j == 0) {
//sum[threadIdx.x] = make_float4(seed.x,seed.y,seed.z,seed.w);
//return;
//}
__syncthreads();
} // end of for loop
//return;
__syncthreads();
float widthi = 1./width;
for (int j=0; j < edge; j++) {
__syncthreads();
if (threadIdx.x < edge) {
float f = temp[j*edge+threadIdx.x].w + 0.1; // so that int() works
if (int(f) != seed.w) {
temp[j*edge+threadIdx.x] = make_float4(0.,0.,0.,0.);
} else {
temp[j*edge+threadIdx.x].x = (xorig+threadIdx.x) * widthi;
temp[j*edge+threadIdx.x].y = (yorig+j) * widthi;
temp[j*edge+threadIdx.x].w = 1.;
}
}
}
__syncthreads();
#if 0
for (int j=0; j < edge; j++) {
sumf[j*4*edge+threadIdx.x] = tempf[j*4*edge+threadIdx.x];
}
return;
#endif
#if 0
for (int j=0; j < edge; j++) { // the loop added 2 registers (could be unrolled)
//sumf[j*4*edge+threadIdx.x] = g_idata_f[arrayid1];
sumf[j*4*edge+threadIdx.x] = tempf[j*4*edge+threadIdx.x];
//sumf[j*4*edge+threadIdx.x] = xorig;
//sum[threadIdx.x] = make_float4(seed.x,seed.y,seed.z,seed.w);
}
return;
#endif
//--------------------
// xorig - edge/2, xorig + edge/2 - 1
// For the 16x16 case (hardcoded), the first pass with 64 threads can
// only handle 1/2 the domain (1024 floats = 16x16x4). The for loop that
// follows had a thread handle two floats at a time, so can only handl
// 1/2 the domain on each pass
// manually treat each half of the domain
int offset = 1;
//====
int tid, j, ai, bi;
int mx = 8;
tid = threadIdx.x >> 2;
j = threadIdx.x - (tid << 2);
for (int outer=0; outer < 3; outer++) { // HARDCODED
for (int k=0; k < mx; k++) {
__syncthreads();
int off = k * 128 * (1 << outer); // HARDCODED
ai = offset*(2*tid+1)-1;
bi = offset*(2*tid+2)-1;
ai = (ai << 2) + j;
bi = (bi << 2) + j;
tempf[bi+off] += tempf[ai+off];
}
mx >> 1;
offset <<= 1;
}
//====
#if 0
for (int j=0; j < edge; j++) { // the loop added 2 registers (could be unrolled)
//sumf[j*4*edge+threadIdx.x] = g_idata_f[arrayid1];
sumf[j*4*edge+threadIdx.x] = tempf[j*4*edge+threadIdx.x];
//sumf[j*4*edge+threadIdx.x] = xorig;
//sum[threadIdx.x] = make_float4(seed.x,seed.y,seed.z,seed.w);
}
return;
#endif
#
int sz = edge*edge / 2; // (128 for 64 threads)
#if 1
// build the sum in place up the tree
for (int d = sz>>1; d > 0; d >>= 1) {
__syncthreads();
if (threadIdx.x < d)
{
//int tid = threadIdx.x >> 2; // thread id divided by 4
//int j = threadIdx.x - (tid << 2); // 0,1,2,3
int ai = offset*(2*tid+1)-1;
int bi = offset*(2*tid+2)-1;
ai = (ai << 2) + j;
bi = (bi << 2) + j;
tempf[bi] += tempf[ai];
}
offset <<= 1;
}
#endif
#
#if 0
for (int j=0; j < edge; j++) { // the loop added 2 registers (could be unrolled)
//sumf[j*4*edge+threadIdx.x] = g_idata_f[arrayid1];
sumf[j*4*edge+threadIdx.x] = tempf[j*4*edge+threadIdx.x];
//sumf[j*4*edge+threadIdx.x] = xorig;
//sum[threadIdx.x] = make_float4(seed.x,seed.y,seed.z,seed.w);
}
//return;
#endif
// Something wrong with the results
// write results to global memory
__syncthreads();
if (threadIdx.x == (numThreads-1)) {
int el = edge*edge-1;
//int el = 0;
float nbs = temp[el].w;
//float nbs1 = 1./(nbs*width);
float nbs1 = 1./(nbs);
if (nbs <= 0.5) nbs1 = 1.;
//sum[blockIdx.x] = make_float4(temp[el].x*nbs1, temp[el].y*nbs1, width, nbs);
sum[blockIdx.x] = make_float4(temp[el].x*nbs1, temp[el].y*nbs1, 0., nbs); // orig
}
}
//----------------------------------------------------------------------
__global__ void scan_workefficient_6(float4 *g_idata, float4 *sum, int4* seeds, int n, int edge, int width)
// More efficient version of scan_workefficient_2 (more threads + remove non-coalesced reads)
// Use more threads by reading floats instead of float4
// Remove bank conflicts (decrease serialized_warps)
{
// edge=16, 64 threads: scan each row, one float per thread
// Dynamically allocated shared memory for scan kernels
extern __shared__ float4 temp[];
float* tempf = (float*) temp;
float* sumf = (float*) sum;
// blockDim.x == edge (will generalize later)
int numThreads = blockDim.x * blockDim.y;
// if (blockIdx.x > 1000) return;
#if 1
// SOMETHING NOT WORKING
int last_share = edge*edge + ((edge*edge) >> LOG_NUM_BANKS);
//int last_share = 0;
if (threadIdx.x == 0) {
int4 ss = seeds[blockIdx.x];
temp[last_share] = make_float4(ss.x+0.1,ss.y+0.1,ss.z+0.1,ss.w+0.1);
}
__syncthreads();
int4 seed = MAKE_INT4(temp[last_share]); // is int correct? Or must add 0.5?
__syncthreads();
#endif
#if 0
//int4 seed = make_int4(8,8,0,311); // TEST SEED
int4 seed = seeds[blockIdx.x];
#endif
float* g_idata_f = (float*) g_idata;
// get data from global memory (should be coalesced)
int x = seed.x;
int y = seed.y;
// edge should be part of the seed datastructure (per block)
int edge2 = edge >> 2;
int xorig = x - edge2; // one thread per float (edge float4's)
int yorig = y - edge2; // edge rows
// align xorig such that xorig is a multiple of 2 (speedup is evident)
//xorig = (xorig >> 1) << 1; // xorig is divisble by 2^1
int shift = xorig - ((xorig >> 2) << 2);
if (shift == 1) xorig -= 1;
else if (shift == 2) xorig += 2;
else if (shift == 3) xorig += 1;
//else return;
int flag1;
int WW = width; // array width (argument) (in float4)
int HH = WW; // height of flat texture // MUST READ AS ARGUMENT
int xid = 4*xorig + threadIdx.x; // measured in floats
//--------------------
// one iteration per row in the square tile
for (int j=0; j < edge; j++) { // the loop added 2 registers (could be unrolled)
__syncthreads();
flag1 = 1;
// need for each of the strings separately
if (xid < 0 || xid >= (WW*4)) flag1 = 0;
int yid1 = yorig + j;
if (yid1 < 0 || yid1 >= HH) flag1 = 0;
int arrayid1 = xid + yid1*WW*4; // WW*4 floats
// I MUST ALSO CHECK THE SEED VALUE
TMPF(j*4*edge+threadIdx.x) = 0.;
// crashes without this test
if (flag1 != 0) {
TMPF(j*4*edge+threadIdx.x) = g_idata_f[arrayid1];
//tempf[j*4*edge+threadIdx.x] = 0.; // very low overhead
}
//sumf[j*4*edge+threadIdx.x] = g_idata_f[arrayid1];
//sumf[j*4*edge+threadIdx.x] = tempf[j*4*edge+threadIdx.x];
//sumf[j*4*edge+threadIdx.x] = tempf[j*4*edge+threadIdx.x];
//sumf[j*4*edge+threadIdx.x] = xorig;
//if (j == 0) {
//sum[threadIdx.x] = make_float4(seed.x,seed.y,seed.z,seed.w);
//return;
//}
__syncthreads();
} // end of for loop
//return;
__syncthreads();
float widthi = 1./width;
for (int j=0; j < edge; j++) {
__syncthreads();
if (threadIdx.x < edge) {
float f = temp[j*edge+threadIdx.x].w + 0.1; // so that int() works
if (int(f) != seed.w) {
temp[j*edge+threadIdx.x] = make_float4(0.,0.,0.,0.);
} else {
temp[j*edge+threadIdx.x].x = (xorig+threadIdx.x) * widthi;
temp[j*edge+threadIdx.x].y = (yorig+j) * widthi;
temp[j*edge+threadIdx.x].w = 1.;
}
}
}
__syncthreads();
#if 0
for (int j=0; j < edge; j++) {
sumf[j*4*edge+threadIdx.x] = TMPF(j*4*edge+threadIdx.x);
}
return;
#endif
#if 0
for (int j=0; j < edge; j++) { // the loop added 2 registers (could be unrolled)
//sumf[j*4*edge+threadIdx.x] = g_idata_f[arrayid1];
sumf[j*4*edge+threadIdx.x] = TMPF(j*4*edge+threadIdx.x);
//sumf[j*4*edge+threadIdx.x] = xorig;
//sum[threadIdx.x] = make_float4(seed.x,seed.y,seed.z,seed.w);
}
return;
#endif
//--------------------
// xorig - edge/2, xorig + edge/2 - 1
// For the 16x16 case (hardcoded), the first pass with 64 threads can
// only handle 1/2 the domain (1024 floats = 16x16x4). The for loop that
// follows had a thread handle two floats at a time, so can only handl
// 1/2 the domain on each pass
// manually treat each half of the domain
int offset = 1;
//====
int mx = 8;
for (int outer=0; outer < 3; outer++) { // HARDCODED
for (int k=0; k < mx; k++) {
__syncthreads();
int off = k * 128 * (1 << outer); // HARDCODED
int tid = threadIdx.x >> 2;
int j = threadIdx.x - (tid << 2);
int ai = offset*(2*tid+1)-1;
int bi = offset*(2*tid+2)-1;
ai = (ai << 2) + j;
bi = (bi << 2) + j;
TMPF(bi+off) += TMPF(ai+off);
}
mx >> 1;
offset <<= 1;
}
//====
#if 0
for (int j=0; j < edge; j++) { // the loop added 2 registers (could be unrolled)
//sumf[j*4*edge+threadIdx.x] = g_idata_f[arrayid1];
sumf[j*4*edge+threadIdx.x] = TMPF(j*4*edge+threadIdx.x);
//sumf[j*4*edge+threadIdx.x] = xorig;
//sum[threadIdx.x] = make_float4(seed.x,seed.y,seed.z,seed.w);
}
return;
#endif
int sz = edge*edge / 2; // (128 for 64 threads)
#if 1
// build the sum in place up the tree
for (int d = sz>>1; d > 0; d >>= 1) {
__syncthreads();
if (threadIdx.x < d)
{
int tid = threadIdx.x >> 2; // thread id divided by 4
int j = threadIdx.x - (tid << 2); // 0,1,2,3
int ai = offset*(2*tid+1)-1;
int bi = offset*(2*tid+2)-1;
ai = (ai << 2) + j;
bi = (bi << 2) + j;
TMPF(bi) += TMPF(ai);
}
offset <<= 1;
}
#endif
#
#if 0
for (int j=0; j < edge; j++) { // the loop added 2 registers (could be unrolled)
//sumf[j*4*edge+threadIdx.x] = g_idata_f[arrayid1];
sumf[j*4*edge+threadIdx.x] = TMPF(j*4*edge+threadIdx.x);
//sumf[j*4*edge+threadIdx.x] = xorig;
//sum[threadIdx.x] = make_float4(seed.x,seed.y,seed.z,seed.w);
}
return;
#endif
// Something wrong with the results
// write results to global memory
__syncthreads();
if (threadIdx.x == (numThreads-1)) {
int el = edge*edge-1;
//int el = 0;
float nbs = temp[el].w;
//float nbs1 = 1./(nbs*width);
float nbs1 = 1./(nbs);
if (nbs <= 0.5) nbs1 = 1.;
//sum[blockIdx.x] = make_float4(temp[el].x*nbs1, temp[el].y*nbs1, width, nbs);
sum[blockIdx.x] = make_float4(temp[el].x*nbs1, temp[el].y*nbs1, 0., nbs); // orig
}
}
//----------------------------------------------------------------------
__global__ void scan_workefficient_7(float4 *g_idata, float4 *sum, int4* seeds, int n, int edge, int width)
// More efficient version of scan_workefficient_2 (more threads + remove non-coalesced reads)
// Use more threads by reading floats instead of float4
{
// edge=16, 64 threads: scan each row, one float per thread
// Dynamically allocated shared memory for scan kernels
extern __shared__ float4 temp[];
float* tempf = (float*) temp;
float* sumf = (float*) sum;
// blockDim.x == edge (will generalize later)
int numThreads = blockDim.x * blockDim.y;
//if (blockIdx.x != 11) return; // block 13 has serial errors
#if 1
// SOMETHING NOT WORKING
int last_share = edge*edge + ((edge*edge) >> LOG_NUM_BANKS);
//int last_share = 0;
if (threadIdx.x == 0) {
int4 ss = seeds[blockIdx.x];
temp[last_share] = make_float4(ss.x+0.1,ss.y+0.1,ss.z+0.1,ss.w+0.1);
}
__syncthreads();
int4 seed = MAKE_INT4(temp[last_share]); // is int correct? Or must add 0.5?
__syncthreads();
#endif
#if 0
//int4 seed = make_int4(8,8,0,311); // TEST SEED
int4 seed = seeds[blockIdx.x];
#endif
float* g_idata_f = (float*) g_idata;
// get data from global memory (should be coalesced)
int x = seed.x;
int y = seed.y;
// edge should be part of the seed datastructure (per block)
int edge2 = edge >> 2;
int xorig = x - edge2; // one thread per float (edge float4's)
int yorig = y - edge2; // edge rows
// align xorig such that xorig is a multiple of 2 (speedup is evident)
//xorig = (xorig >> 1) << 1; // xorig is divisble by 2^1
int shift = xorig - ((xorig >> 2) << 2);
if (shift == 1) xorig -= 1;
else if (shift == 2) xorig += 2;
else if (shift == 3) xorig += 1;
//else return;
int flag1;
int WW = width; // array width (argument) (in float4)
int HH = WW; // height of flat texture // MUST READ AS ARGUMENT
//--------------------
// one iteration per row in the square tile
for (int j=0; j < edge; j+=4) { // the loop added 2 registers (could be unrolled)
__syncthreads();
//
int subtid = j >> 4; // 0, 1, ..., numThreads/4
int subrow = j - subtid;
//
flag1 = 1;
//
// // need for each of the strings separately
int xid = 4*xorig + threadIdx.x; // measured in floats
if (xid < 0 || xid >= (WW*4)) flag1 = 0;
//
int yid1 = yorig + j + threadIdx.y;
if (yid1 < 0 || yid1 >= HH) flag1 = 0;
//
int arrayid1 = xid + yid1*WW*4; // WW*4 floats
//
// // I MUST ALSO CHECK THE SEED VALUE
//
//
int jj = j+threadIdx.y;
tempf[jj*4*edge+threadIdx.x] = 0.;
//
// // crashes without this test
if (flag1 != 0) {
tempf[jj*4*edge+threadIdx.x] = g_idata_f[arrayid1];
//tempf[jj*4*edge+threadIdx.x] = 0.; // very low overhead
}
//
//
// //sumf[jj*4*edge+threadIdx.x] = g_idata_f[arrayid1];
// //sumf[jj*4*edge+threadIdx.x] = tempf[jj*4*edge+threadIdx.x];
// //sumf[jj*4*edge+threadIdx.x] = tempf[jj*4*edge+threadIdx.x];
// //sumf[jj*4*edge+threadIdx.x] = xorig;
//
// //if (j == 0) {
// //sum[threadIdx.x] = make_float4(seed.x,seed.y,seed.z,seed.w);
// //return;
// //}
//
__syncthreads();
} // end of for loop
//
//
__syncthreads();
//
float widthi = 1./width;
int thread_id = threadIdx.x + blockDim.x * threadIdx.y;
// use float4
// not the problem
#if 0
// 256 threads
int tid = thread_id >> 2;
int j4 = thread_id - (tid << 2);
int (int j=0; j < 4; j++) {
__syncthreads();
if (j4 == 3) {
float f = tempf[j*256 + tid + j4] + 0.1;
if (int(f) != seed.w) {
//tempf[j*256+thread_id] = 0.;
temp[j*64+threadIdx.x] = make_float4(0.,0.,0.,0.);
}
}
}
#endif
// use float4
// not the problem
#if 1
for (int j=0; j < edge; j++) { // takes 1.7 ms
__syncthreads();
if (threadIdx.x < edge && threadIdx.y == 0) {
int tid = threadIdx.x;
float f = temp[j*16+tid].w + 0.1; // so that int() works
if (int(f) != seed.w) {
temp[j*16+tid] = make_float4(0.,0.,0.,0.); // cause of serialization
} // else {
//float4 g;
// Will do this later
//g.x = (xorig+threadIdx.x) * widthi;
//g.y = (yorig+j) * widthi;
//g.z = 0.;
//g.w = 1.;
//temp[j*edge+threadIdx.x] = g;
// }
}
}
#endif
// use float
#if 0
for (int j=0; j < edge; j++) { // takes 1.7 ms
__syncthreads();
if (threadIdx.x < edge && threadIdx.y == 0) {
int tid = thread_id >> 2;
int j = thread_id - (tid << 2);
float f = tempf[4*j*16+tid+j] + 0.1; // so that int() works
if (int(f) != seed.w) {
tempf[4*j*16+tid] = 0.;
tempf[4*j*16+tid+1] = 0.;
tempf[4*j*16+tid+2] = 0.;
tempf[4*j*16+tid+3] = 0.;
} else {
float4 g;
// Will do this later
//g.x = (xorig+threadIdx.x) * widthi;
//g.y = (yorig+j) * widthi;
//g.z = 0.;
//g.w = 1.;
//temp[jj*edge+threadIdx.x] = g;
}
}
}
#endif
__syncthreads();
//return;
#if 0
for (int j=0; j < 4; j++) { // the loop added 2 registers (could be unrolled)
sumf[j*256+thread_id] = tempf[j*256+thread_id];
}
return;
#endif
// //--------------------
//
//// xorig - edge/2, xorig + edge/2 - 1
//
//// For the 16x16 case (hardcoded), the first pass with 64 threads can
//// only handle 1/2 the domain (1024 floats = 16x16x4). The for loop that
//// follows had a thread handle two floats at a time, so can only handl
//// 1/2 the domain on each pass
//
//// manually treat each half of the domain
//
int offset = 1;
// //====
//
int tid, j4, ai, bi;
// for (int j=0; j < 4; j++) { // the loop added 2 registers (could be unrolled)
//sumf[j*256+thread_id] = tempf[j*256+thread_id];
//}
//return;
tid = thread_id >> 2;
j4 = thread_id - (tid << 2);
for (int k=0; k < 2; k++) {
__syncthreads();
int off = k * 512;
ai = offset*(2*tid+1)-1;
bi = offset*(2*tid+2)-1;
ai = (ai << 2) + j4;
bi = (bi << 2) + j4;
tempf[bi+off] += tempf[ai+off];
}
offset <<= 1;
//====
__syncthreads();
#if 0
for (int j=0; j < 4; j++) { // the loop added 2 registers (could be unrolled)
sumf[j*256+thread_id] = tempf[j*256+thread_id];
}
return;
#endif
//
//
int sz = 512; // * edge*edge; // (512 for 256 threads)
//return;
//
#if 1
// build the sum in place up the tree
for (int d = sz>>3; d > 0; d >>= 1) {
__syncthreads();
if (thread_id < (d*4)) // 4 subthreads per thread: 64*4 = 256
{
int ai = offset*(2*tid+1)-1;
int bi = offset*(2*tid+2)-1;
ai = (ai << 2) + j4;
bi = (bi << 2) + j4;
tempf[bi] += tempf[ai];
}
offset <<= 1;
//if (d == 0) break;
}
#endif
#if 0
for (int j=0; j < 4; j++) { // the loop added 2 registers (could be unrolled)
sumf[j*256+thread_id] = tempf[j*256+thread_id];
}
return;
#endif
// Something wrong with the results
// write results to global memory
__syncthreads();
if (threadIdx.x == (blockDim.x-1)) {
int el = edge*edge-1;
//int el = 0;
float nbs = temp[el].w;
//float nbs1 = 1./(nbs*width);
float nbs1 = 1./(nbs);
if (nbs <= 0.5) nbs1 = 1.;
//sum[blockIdx.x] = make_float4(temp[el].x*nbs1, temp[el].y*nbs1, width, nbs);
sum[blockIdx.x] = make_float4(temp[el].x*nbs1, temp[el].y*nbs1, 0., nbs); // orig
}
}
//----------------------------------------------------------------------
#endif // #ifndef _SCAN_WORKEFFICIENT_KERNEL_H_
|
86fd1e92b4e5483852ff9b8f71826d186abbe50d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdint.h>
__device__ __forceinline__
int getLinearIndex(int row, int col, int slice, int nRows, int nCols){
//image indexing is column major
return slice*nRows*nCols + col * nRows + row;
}
__device__ __forceinline__
double getTileAverage(int row, int col, int slice, int tileSize, int imageSize, int* image){
int i, j;
double sum = 0.0;
double size = tileSize * tileSize;
for(i = 0; i < tileSize; i++){
for(j = 0; j < tileSize; j++){
int tempRow = row + i;
int tempCol = col + j;
int tempLinearIndex = getLinearIndex(tempRow, tempCol, slice, imageSize, imageSize);
sum = sum + image[tempLinearIndex];
if(slice == 0){
printf("IMAGE VALUE: %d ; SUM: %d\n", image[tempLinearIndex], sum);
}
}
}
printf("SUM: %d \n", sum);
return sum/size;
}
//tileSize = side length of tile
//numTiles = num of tiles per side
//threadsPerBlock = fixed at 16
template <typename T>
__device__ __forceinline__
void mosaic(T* image, const T* reds, const T* greens, const T* blues, int numSamples, int* nearestTiles, int tileSize,
int numTiles, int threadsPerBlock){
//Calculate what tile this is
int tileRowIdx = blockIdx.x * threadsPerBlock + threadIdx.x;
int tileColIdx = blockIdx.y * threadsPerBlock + threadIdx.y;
//Calculate top-left pixel of current tile,
int pixelRow = tileRowIdx * tileSize;
int pixelCol = tileColIdx * tileSize;
//targetImageSize = side length of target image in pixels
int targetImageSize = tileSize * numTiles;
if(pixelRow >= targetImageSize || pixelCol >= targetImageSize){
return;
}
double avgR = getTileAverage(pixelRow, pixelCol, 0, tileSize, targetImageSize, image);
double avgG = getTileAverage(pixelRow, pixelCol, 1, tileSize, targetImageSize, image);
double avgB = getTileAverage(pixelRow, pixelCol, 2, tileSize, targetImageSize, image);
printf("Tuple of averages: %d, %d, %d \n", avgR, avgG, avgB);
double minDistance = -1;
int minDistanceIndex = -1;
int i;
for(i = 0; i < numSamples; i = i+1){
double tempDistance = fabs(pow(avgR-reds[i], 2) + pow(avgB-blues[i], 2) + pow(avgG-greens[i], 2));
if(fabs(tempDistance) < minDistance || minDistance == -1){
minDistance = tempDistance;
minDistanceIndex = i;
}
}
//Tiles are indexed in row-major order
int tileLinearIndex = tileRowIdx * numTiles + tileColIdx;
nearestTiles[tileLinearIndex] = minDistanceIndex;
return;
}
__global__
void mosaic_cuda_double(int* nearestTile, int* image, const int* red, const int* green, const int* blue, int numSamples,
int tileSize, int numTiles, int threadsPerBlock){
mosaic(image, red, green, blue, numSamples, nearestTile, tileSize, numTiles, threadsPerBlock);
return;
}
| 86fd1e92b4e5483852ff9b8f71826d186abbe50d.cu | #include <math.h>
#include <stdio.h>
#include <stdint.h>
__device__ __forceinline__
int getLinearIndex(int row, int col, int slice, int nRows, int nCols){
//image indexing is column major
return slice*nRows*nCols + col * nRows + row;
}
__device__ __forceinline__
double getTileAverage(int row, int col, int slice, int tileSize, int imageSize, int* image){
int i, j;
double sum = 0.0;
double size = tileSize * tileSize;
for(i = 0; i < tileSize; i++){
for(j = 0; j < tileSize; j++){
int tempRow = row + i;
int tempCol = col + j;
int tempLinearIndex = getLinearIndex(tempRow, tempCol, slice, imageSize, imageSize);
sum = sum + image[tempLinearIndex];
if(slice == 0){
printf("IMAGE VALUE: %d ; SUM: %d\n", image[tempLinearIndex], sum);
}
}
}
printf("SUM: %d \n", sum);
return sum/size;
}
//tileSize = side length of tile
//numTiles = num of tiles per side
//threadsPerBlock = fixed at 16
template <typename T>
__device__ __forceinline__
void mosaic(T* image, const T* reds, const T* greens, const T* blues, int numSamples, int* nearestTiles, int tileSize,
int numTiles, int threadsPerBlock){
//Calculate what tile this is
int tileRowIdx = blockIdx.x * threadsPerBlock + threadIdx.x;
int tileColIdx = blockIdx.y * threadsPerBlock + threadIdx.y;
//Calculate top-left pixel of current tile,
int pixelRow = tileRowIdx * tileSize;
int pixelCol = tileColIdx * tileSize;
//targetImageSize = side length of target image in pixels
int targetImageSize = tileSize * numTiles;
if(pixelRow >= targetImageSize || pixelCol >= targetImageSize){
return;
}
double avgR = getTileAverage(pixelRow, pixelCol, 0, tileSize, targetImageSize, image);
double avgG = getTileAverage(pixelRow, pixelCol, 1, tileSize, targetImageSize, image);
double avgB = getTileAverage(pixelRow, pixelCol, 2, tileSize, targetImageSize, image);
printf("Tuple of averages: %d, %d, %d \n", avgR, avgG, avgB);
double minDistance = -1;
int minDistanceIndex = -1;
int i;
for(i = 0; i < numSamples; i = i+1){
double tempDistance = fabs(pow(avgR-reds[i], 2) + pow(avgB-blues[i], 2) + pow(avgG-greens[i], 2));
if(fabs(tempDistance) < minDistance || minDistance == -1){
minDistance = tempDistance;
minDistanceIndex = i;
}
}
//Tiles are indexed in row-major order
int tileLinearIndex = tileRowIdx * numTiles + tileColIdx;
nearestTiles[tileLinearIndex] = minDistanceIndex;
return;
}
__global__
void mosaic_cuda_double(int* nearestTile, int* image, const int* red, const int* green, const int* blue, int numSamples,
int tileSize, int numTiles, int threadsPerBlock){
mosaic(image, red, green, blue, numSamples, nearestTile, tileSize, numTiles, threadsPerBlock);
return;
}
|
d1198ad2f93b19ce7ab784cb80e1658b2419d599.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "zone_map.h"
void process_error(int severity, string err); // this should probably live in a utils header file
bool fh_equal_to(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
bool fh_less(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
bool fh_greater(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
bool fh_greater_equal_to(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
bool fh_less_equal_to(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
char host_logical_and(char column1, char column2)
{
//cout << "AND " << column1 << " " << column2 << endl;
if (column1 == 'A' && column2 == 'A')
return 'A';
else if (column1 == 'N' || column2 == 'N') {
return 'N';
}
else
return 'R';
}
char host_logical_or(char column1, char column2)
{
//cout << "OR " << column1 << " " << column2 << endl;
if (column1 == 'A' && column2 == 'A')
return 'A';
else if (column1 == 'N' && column2 == 'N')
return 'N';
else
return 'R';
}
char host_compare(int_type s, int_type d, int_type op_type)
{
char res = 'N';
if (op_type == 2 && d>s ) // >
res = 'A';
else if (op_type == 1 && d<s) // <
res = 'A';
else if (op_type == 6 && d>=s) // >=
res = 'A';
else if (op_type == 5 && d<=s) // <=
res = 'A';
else if (op_type == 4 && d==s)// =
res = 'A';
else // !=
if(d!=s) res = 'A';
return res;
}
char host_compare(float_type s, float_type d, int_type op_type)
{
char res = 'N';
if (op_type == 2 && (d-s) > EPSILON) // >
res = 'A';
else if (op_type == 1 && (s-d) > EPSILON) // <
res = 'A';
else if (op_type == 6 && ((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) // >=
res = 'A';
else if (op_type == 5 && ((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) // <=
res = 'A';
else if (op_type == 4 && ((d-s) < EPSILON) && ((d-s) > -EPSILON))// =
res = 'A';
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 'A';
return res;
}
char host_compare(int_type* column1, int_type d, int_type op_type)
{
char res = 'R';
//cout << "CMP " << column1[0] << " " << column1[1] << " with " << d << endl;
if (op_type == 2) { // >
if (column1[1] <= d)
res = 'N';
else if (column1[0] > d)
res = 'A';
}
else if (op_type == 1) { // <
if (column1[0] >= d)
res = 'N';
else if (column1[1] < d)
res = 'A';
}
else if (op_type == 6) { // >=
if (column1[1] < d)
res = 'N';
else if (column1[0] >= d)
res = 'A';
}
else if (op_type == 5) { // <=
if (column1[0] > d)
res = 'N';
else if (column1[1] <= d)
res = 'A';
}
else if (op_type == 4 && column1[0] == d && column1[1] == d) { // =
res = 'A';
};
//cout << "res " << res << endl;
return res;
}
char host_compare(float_type* column1, float_type d, int_type op_type)
{
char res = 'R';
//cout << "CMP " << column1[0] << " " << column1[1] << " with " << d << endl;
if (op_type == 2) { // >
if(fh_less_equal_to(column1[1],d)) {
res = 'N';
}
else if(fh_greater(column1[0],d)) {
res = 'A';
};
}
else if (op_type == 1) { // <
if(fh_less(column1[1],d)) {
res = 'A';
}
else if(fh_greater_equal_to(column1[0],d)) {
res = 'N';
};
}
else if (op_type == 6) { // >=
if(fh_greater_equal_to(column1[0],d)) {
res = 'A';
}
else if(fh_less(column1[1],d)) {
res = 'N';
};
}
else if (op_type == 5) { // <=
if(fh_less_equal_to(column1[1],d)) {
res = 'A';
}
else if(fh_greater(column1[0],d)) {
res = 'N';
};
}
else if (op_type == 4 && fh_equal_to(column1[0],d) && fh_equal_to(column1[1],d)) // =
res = 'A';
//cout << "res " << res << endl;
return res;
}
char host_compare(int_type* column1, int_type* column2, int_type op_type)
{
char res = 'R';
if (op_type == 2) { // >
if(column1[0] > column2[1])
res = 'A';
else if(column1[1] <= column2[0])
res = 'N';
}
else if (op_type == 1) { // <
if(column1[1] < column2[0])
res = 'A';
else if(column1[0] >= column2[1])
res = 'N';
}
else if (op_type == 6) { // >=
if(column1[0] >= column2[1])
res = 'A';
else if(column1[1] < column2[0])
res = 'N';
}
else if (op_type == 5) { // <=
if(column1[1] <= column2[0])
res = 'A';
else if(column1[0] > column2[1])
res = 'N';
}
else if (op_type == 4 && column1[0] == column2[1] && column1[1] == column2[0]) // =
res = 'A';
return res;
}
char host_compare(float_type* column1, float_type* column2, int_type op_type)
{
char res = 'R';
if (op_type == 2) { // >
if(fh_greater(column1[0],column2[1]))
res = 'A';
else if(fh_less_equal_to(column1[1],column2[0]))
res = 'N';
}
else if (op_type == 1) { // <
if(fh_less(column1[1],column2[0]))
res = 'A';
else if(fh_greater_equal_to(column1[0],column2[1]))
res = 'N';
}
else if (op_type == 6) { // >=
if(fh_greater_equal_to(column1[1],column2[0]))
res = 'A';
else if(fh_less(column1[1],column2[0]))
res = 'N';
}
else if (op_type == 5) { // <=
if(fh_less_equal_to(column1[1],column2[0]))
res = 'A';
else if(fh_greater(column1[0],column2[1]))
res = 'N';
}
else if (op_type == 4 && fh_equal_to(column1[0], column2[1]) && fh_equal_to(column1[1],column2[0])) // =
res = 'A';
return res;
}
char host_compare(float_type* column1, int_type* column2, int_type op_type)
{
char res = 'R';
if (op_type == 2) { // >
if(fh_greater(column1[0],(float_type)column2[1]))
res = 'A';
else if(fh_less_equal_to(column1[1],(float_type)column2[0]))
res = 'N';
}
else if (op_type == 1) { // <
if(fh_less(column1[1],(float_type)column2[0]))
res = 'A';
else if(fh_greater_equal_to(column1[0],(float_type)column2[1]))
res = 'N';
}
else if (op_type == 6) { // >=
if(fh_greater_equal_to(column1[1],(float_type)column2[0]))
res = 'A';
else if(fh_less(column1[1],(float_type)column2[0]))
res = 'N';
}
else if (op_type == 5) { // <=
if(fh_less_equal_to(column1[1],(float_type)column2[0]))
res = 'A';
else if(fh_greater(column1[0],(float_type)column2[1]))
res = 'N';
}
else if (op_type == 4 && fh_equal_to(column1[0],(float_type) column2[1]) && fh_equal_to(column1[1],(float_type)column2[0])) // =
res = 'A';
return res;
}
float_type* host_op(int_type* column1, float_type* column2, string op_type, int reverse)
{
float_type* temp = (float_type*)malloc(2*float_size);
temp[0] = (float_type)column1[0];
temp[1] = (float_type)column1[1];
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = temp[0] * column2[0];
temp[1] = temp[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = temp[0] + column2[0];
temp[1] = temp[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column2[0] - temp[0];
temp[1] = column2[1] - temp[1];
}
else {
temp[0] = column2[0] / temp[0];
temp[1] = column2[1] / temp[1];
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = temp[0] * column2[0];
temp[1] = temp[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = temp[0] + column2[0];
temp[1] = temp[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = temp[0] - column2[0];
temp[1] = temp[1] - column2[1];
}
else {
temp[0] = temp[0] / column2[0];
temp[1] = temp[1] / column2[1];
}
};
return temp;
}
int_type* host_op(int_type* column1, int_type* column2, string op_type, int reverse)
{
int_type* temp = (int_type*)malloc(2*int_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * column2[0];
temp[1] = column1[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + column2[0];
temp[1] = column1[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column1[0] - column2[0];
temp[1] = column1[1] - column2[1];
}
else {
temp[0] = column1[0] / column2[0];
temp[1] = column1[1] / column2[1];
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * column2[0];
temp[1] = column1[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + column2[0];
temp[1] = column1[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column2[0] - column1[0];
temp[1] = column2[1] - column1[1];
}
else {
temp[0] = column2[0] / column1[0];
temp[1] = column2[1] / column1[1];
}
}
return temp;
}
float_type* host_op(float_type* column1, float_type* column2, string op_type, int reverse)
{
float_type* temp = (float_type*)malloc(2*float_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * column2[0];
temp[1] = column1[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + column2[0];
temp[1] = column1[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column1[0] - column2[0];
temp[1] = column1[1] - column2[1];
}
else {
temp[0] = column1[0] / column2[0];
temp[1] = column1[1] / column2[1];
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * column2[0];
temp[1] = column1[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + column2[0];
temp[1] = column1[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column2[0] - column1[0];
temp[1] = column2[1] - column1[1];
}
else {
temp[0] = column2[0] / column1[0];
temp[1] = column2[1] / column1[1];
}
}
return temp;
}
int_type* host_op(int_type* column1, int_type d, string op_type, int reverse)
{
int_type* temp = (int_type*)malloc(2*int_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * d;
temp[1] = column1[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + d;
temp[1] = column1[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column1[0] - d;
temp[1] = column1[1] - d;
}
else {
temp[0] = column1[0] / d;
temp[1] = column1[1] / d;
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * d;
temp[1] = column1[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + d;
temp[1] = column1[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = d - column1[0];
temp[1] = d - column1[1];
}
else {
temp[0] = d / column1[0];
temp[1] = d / column1[1];
}
};
return temp;
}
float_type* host_op(int_type* column1, float_type d, string op_type, int reverse)
{
float_type* temp = (float_type*)malloc(2*float_size);
temp[0] = (float_type)column1[0];
temp[1] = (float_type)column1[1];
float_type* temp1 = (float_type*)malloc(2*float_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp1[0] = temp[0] * d;
temp1[1] = temp[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp1[0] = temp[0] + d;
temp1[1] = temp[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp1[0] = temp[0] - d;
temp1[1] = temp[1] - d;
}
else {
temp1[0] = temp[0] / d;
temp1[1] = temp[1] / d;
}
}
else {
if (op_type.compare("MUL") == 0) {
temp1[0] = temp[0] * d;
temp1[1] = temp[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp1[0] = temp[0] + d;
temp1[1] = temp[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp1[0] = d - temp[0];
temp1[1] = d - temp[1];
}
else {
temp1[0] = d / temp[0];
temp1[1] = d / temp[1];
}
};
free(temp);
return temp1;
}
float_type* host_op(float_type* column1, float_type d, string op_type,int reverse)
{
float_type* temp = (float_type*)malloc(2*float_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * d;
temp[1] = column1[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + d;
temp[1] = column1[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column1[0] - d;
temp[1] = column1[1] - d;
}
else {
temp[0] = column1[0] / d;
temp[1] = column1[1] / d;
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * d;
temp[1] = column1[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + d;
temp[1] = column1[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = d - column1[0];
temp[1] = d - column1[1];
}
else {
temp[0] = d / column1[0];
temp[1] = d / column1[1];
}
};
return temp;
}
unsigned int precision_func(unsigned int& p1, unsigned int& p2, string op) {
if (op.compare("DIV") != 0 ) {
unsigned int res;
if (op.compare("MUL") != 0 ) {
if(p1 > p2) {
res = p1;
p2 = p1-p2;
p1 = 0;
}
else {
res = p1;
p1 = p2-p1;
p2 = 0;
};
return res;
}
else {
//std::swap(p1,p2);
res = p1+p2;
p1 = 0;
p2 = 0;
return res;
};
}
else {
if(p1 == p2) {
p1 = p1+4;
p2 = 0;
return p1;
}
else {
if(p1 > p2) {
p1 = p1 + (p1-p2) + 4;
p2 = 0;
return p1;
}
else {
p2 = p2 + (p2-p1) + 4;
p1 = 0;
return p2;
}
}
};
}
//CudaSet a contains two records - with all minimum and maximum values of the segment
//We need to determine if this segment needs to be processed
//The check takes place in host's memory
char zone_map_check(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, queue<unsigned int> op_nums_precision, CudaSet* a, unsigned int segment)
{
stack<string> exe_type;
stack<string> exe_value;
stack<int_type*> exe_vectors;
stack<float_type*> exe_vectors_f;
stack<int_type> exe_nums;
stack<char> bool_vectors;
stack<float_type> exe_nums_f;
stack<unsigned int> exe_precision;
string s1, s2, s1_val, s2_val;
int_type n1, n2, res;
float_type n1_f, n2_f, res_f;
if(a->not_compressed)
return 'R';
//first we need to set all host arrays [0] and [1] of t to min and max values of appropriate files
set<string> uniques;
queue<string> fields(op_value);
CudaSet *t;
FILE* f;
unsigned int cnt;
string f1;
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front())) {
if(a->filtered)
t = varNames[a->source_name];
else
t = a;
// copy t min and max values to a only if int, decimal or float
if(t->type[fields.front()] <= 1) {
f1 = t->load_file_name + "." + fields.front() + "." + to_string(segment);
f = fopen (f1.c_str() , "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
fread((char *)&cnt, 4, 1, f);
if (t->type[fields.front()] == 0) {
a->h_columns_int[fields.front()].resize(2);
fread((char *)&a->h_columns_int[fields.front()][0], 8, 1, f);
fread((char *)&a->h_columns_int[fields.front()][1], 8, 1, f);
fseek(f, 8+cnt, SEEK_CUR);
fread((char *)&a->mRecCount, 4, 1, f);
//cout << endl << "ZONE " << a->mRecCount << endl;
fread((char *)&cnt, 4, 1, f);
//cout << "file " << f1 << " " << segment << " " << a->h_columns_int[fields.front()][0] << ":" << a->h_columns_int[fields.front()][1] << endl;
}
else {
long long int t;
a->h_columns_float[fields.front()].resize(2);
fread((char *)&t, 8, 1, f);
a->h_columns_float[fields.front()][0] = (float_type)t/100.0;
fread((char *)&t, 8, 1, f);
a->h_columns_float[fields.front()][1] = (float_type)t/100.0;
//cout << "file " << f1 << " " << segment << " " << a->h_columns_float[a->type_index[colIndex]][0] << ":" << a->h_columns_float[a->type_index[colIndex]][1] << endl;
};
fclose(f);
};
};
uniques.insert(fields.front());
fields.pop();
};
for(int i=0; !op_type.empty(); ++i, op_type.pop()) {
string ss = op_type.front();
if (ss.compare("NAME") == 0 || ss.compare("NUMBER") == 0 || ss.compare("VECTOR") == 0 || ss.compare("FLOAT") == 0
|| ss.compare("STRING") == 0) {
exe_type.push(ss);
if (ss.compare("NUMBER") == 0) {
exe_nums.push(op_nums.front());
op_nums.pop();
exe_precision.push(op_nums_precision.front());
op_nums_precision.pop();
}
else if (ss.compare("NAME") == 0) {
if(var_exists(a, op_value.front())) {
exe_value.push(op_value.front());
op_value.pop();
}
else {
process_error(1, "Couldn't find column " + op_value.front());
//cout << "Couldn't find column " << op_value.front() << endl;
//exit(0);
};
}
else if (ss.compare("STRING") == 0) {
exe_value.push(op_value.front());
op_value.pop();
}
if (ss.compare("FLOAT") == 0) {
exe_nums_f.push(op_nums_f.front());
op_nums_f.pop();
}
}
else {
if (ss.compare("MUL") == 0 || ss.compare("ADD") == 0 || ss.compare("DIV") == 0 || ss.compare("MINUS") == 0) {
// get 2 values from the stack
s1 = exe_type.top();
exe_type.pop();
s2 = exe_type.top();
exe_type.pop();
if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
n2 = exe_nums.top();
exe_nums.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1)
n1 = n1*(unsigned int)pow(10,p1);
if(p2)
n2 = n2*(unsigned int)pow(10,p2);
if (ss.compare("ADD") == 0 )
res = n1+n2;
else if (ss.compare("MUL") == 0 )
res = n1*n2;
else if (ss.compare("DIV") == 0 )
res = n1/n2;
else
res = n1-n2;
exe_type.push("NUMBER");
exe_nums.push(res);
}
else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2_f = exe_nums_f.top();
exe_nums_f.pop();
if (ss.compare("ADD") == 0 )
res_f = n1_f+n2_f;
else if (ss.compare("MUL") == 0 )
res_f = n1_f*n2_f;
else if (ss.compare("DIV") == 0 )
res_f = n1_f/n2_f;
else
res_f = n1_f-n2_f;
exe_type.push("FLOAT");
exe_nums_f.push(res_f);
}
else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) {
s1_val = exe_value.top();
exe_value.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR F");
if (a->type[s1_val] == 1) {
float_type* t = a->get_host_float_by_name(s1_val);
exe_vectors_f.push(host_op(t,n1_f,ss,1));
}
else {
int_type* t = a->get_host_int_by_name(s1_val);
exe_vectors_f.push(host_op(t,n1_f,ss,1));
};
}
else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR F");
if (a->type[s2_val] == 1) {
float_type* t = a->get_host_float_by_name(s2_val);
exe_vectors_f.push(host_op(t,n1_f,ss,0));
}
else {
int_type* t = a->get_host_int_by_name(s2_val);
exe_vectors_f.push(host_op(t,n1_f,ss,0));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) {
s1_val = exe_value.top();
exe_value.pop();
n1 = exe_nums.top();
exe_nums.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = a->decimal_zeroes[s1_val];
if (a->type[s1_val] == 1) {
float_type* t = a->get_host_float_by_name(s1_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,(float_type)n1,ss,1));
}
else {
int_type* t = a->get_host_int_by_name(s1_val);
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1) {
t[0] = t[0]*(unsigned int)pow(10,p1);
t[1] = t[1]*(unsigned int)pow(10,p1);
};
if(p2) {
n1 = n1*(unsigned int)pow(10,p2);
};
exe_type.push("VECTOR");
exe_vectors.push(host_op(t,n1,ss,1));
};
}
else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
s2_val = exe_value.top();
exe_value.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = a->decimal_zeroes[s2_val];
if (a->type[s2_val] == 1) {
float_type* t = a->get_host_float_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,(float_type)n1,ss,0));
}
else {
int_type* t = a->get_host_int_by_name(s2_val);
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1) {
t[0] = t[0]*(unsigned int)pow(10,p1);
t[1] = t[1]*(unsigned int)pow(10,p1);
};
if(p2) {
n1 = n1*(unsigned int)pow(10,p2);
};
exe_type.push("VECTOR");
exe_vectors.push(host_op(t,n1,ss,0));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) {
return 'R';
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0 ) && s2.compare("NAME") == 0) {
s2_val = exe_value.top();
exe_value.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = a->decimal_zeroes[s2_val];
if (a->type[s2_val] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto pres = precision_func(p1, p2,ss);
exe_precision.push(pres);
if(p1) {
t[0] = t[0]*(unsigned int)pow(10,p1);
t[1] = t[1]*(unsigned int)pow(10,p1);
};
if(p2) {
s3[0] = s3[0]*(unsigned int)pow(10,p2);
s3[1] = s3[1]*(unsigned int)pow(10,p2);
};
exe_vectors.push(host_op(t,s3,ss,0));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,s3,ss,0));
hipFree(s3);
}
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,t, ss,0));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,s3,ss,0));
hipFree(s3);
}
};
}
else if ((s2.compare("VECTOR") == 0 || s2.compare("VECTOR F") == 0 ) && s1.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = a->decimal_zeroes[s1_val];
if (a->type[s1_val] == 0) {
int_type* t = a->get_host_int_by_name(s1_val);
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1) {
t[0] = t[0]*(unsigned int)pow(10,p1);
t[1] = t[1]*(unsigned int)pow(10,p1);
};
if(p2) {
s3[0] = s3[0]*(unsigned int)pow(10,p2);
s3[1] = s3[1]*(unsigned int)pow(10,p2);
};
exe_vectors.push(host_op(t,s3,ss,1));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,s3,ss,1));
hipFree(s3);
}
}
else {
float_type* t = a->get_host_float_by_name(s1_val);
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,t,ss,1));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,s3,ss,1));
hipFree(s3);
}
};
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1) {
s3[0] = s3[0]*(unsigned int)pow(10,p1);
s3[1] = s3[1]*(unsigned int)pow(10,p1);
};
if(p2) {
n1 = n1*(unsigned int)pow(10,p2);
};
exe_vectors.push(host_op(s3,n1, ss,1));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,(float_type)n1, ss,1));
hipFree(s3);
}
}
else if (s1.compare("NUMBER") == 0 &&( s2.compare("VECTOR") || s2.compare("VECTOR F") == 0)) {
n1 = exe_nums.top();
exe_nums.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1) {
s3[0] = s3[0]*(unsigned int)pow(10,p1);
s3[1] = s3[1]*(unsigned int)pow(10,p1);
};
if(p2) {
n1 = n1*(unsigned int)pow(10,p2);
};
exe_vectors.push(host_op(s3,n1, ss,0));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,(float_type)n1, ss,0));
hipFree(s3);
}
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,n1_f, ss,1));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,n1_f, ss,1));
hipFree(s3);
}
}
else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR") == 0) {
n1_f = (float_type)exe_nums.top();
exe_nums.pop();
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,n1_f, ss,0));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,n1_f, ss,0));
hipFree(s3);
}
}
else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
int_type* s4 = exe_vectors.top();
exe_vectors.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1) {
s3[0] = s3[0]*(unsigned int)pow(10,p1);
s3[1] = s3[1]*(unsigned int)pow(10,p1);
};
if(p2) {
s4[0] = s4[0]*(unsigned int)pow(10,p2);
s4[1] = s4[1]*(unsigned int)pow(10,p2);
};
exe_type.push("VECTOR");
exe_vectors.push(host_op(s3, s4,ss,1));
hipFree(s3);
hipFree(s4);
}
else if(s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3, s4,ss,1));
hipFree(s3);
hipFree(s4);
}
else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3, s4,ss,0));
hipFree(s3);
hipFree(s4);
}
else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3, s4,ss,1));
hipFree(s3);
hipFree(s4);
}
}
else if (ss.compare("CMP") == 0) {
int_type cmp_type = op_nums.front();
op_nums.pop();
s1 = exe_type.top();
exe_type.pop();
s2 = exe_type.top();
exe_type.pop();
if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
n2 = exe_nums.top();
exe_nums.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = ::max(p1, p2);
exe_precision.push(pres);
exe_type.push("VECTOR");
if(p1)
n1 = n1*(unsigned int)pow(10,pres-p1);
if(p2)
n2 = n2*(unsigned int)pow(10,pres-p2);
bool_vectors.push(host_compare(n1,n2,cmp_type));
}
else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(n1_f,n2_f,cmp_type));
}
else if (s1.compare("FLOAT") == 0 && s2.compare("NUMBER") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(n1_f,float_type(n2),cmp_type));
}
else if (s1.compare("NUMBER") == 0 && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(n1_f,float_type(n2),cmp_type));
}
else if (s1.compare("STRING") == 0 && s2.compare("NAME") == 0) {
exe_type.push("VECTOR");
bool_vectors.push('R'); // later I plan to change implementation of char type so I will leave indexing of char off for now
}
else if (s1.compare("NAME") == 0 && s2.compare("STRING") == 0) {
exe_type.push("VECTOR");
bool_vectors.push('R');
}
else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
s1_val = exe_value.top();
exe_value.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = a->decimal_zeroes[s2_val];
if (a->type[s1_val] == 0) {
int_type* t = a->get_host_int_by_name(s1_val);
exe_type.push("VECTOR");
auto pres = ::max(p1, p2);
exe_precision.push(pres);
if(p1) {
t[0] = t[0]*(unsigned int)pow(10,pres-p1);
t[1] = t[1]*(unsigned int)pow(10,pres-p1);
};
if(p2) {
n1 = n1*(unsigned int)pow(10,pres-p2);
};
bool_vectors.push(host_compare(t,n1,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s1_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,(float_type)n1,cmp_type));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) {
cmp_type = reverse_op(cmp_type);
n1 = exe_nums.top();
exe_nums.pop();
s2_val = exe_value.top();
exe_value.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = a->decimal_zeroes[s2_val];
if (a->type[s2_val] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
exe_type.push("VECTOR");
auto pres = ::max(p1, p2);
exe_precision.push(pres);
if(p1) {
t[0] = t[0]*(unsigned int)pow(10,pres-p1);
t[1] = t[1]*(unsigned int)pow(10,pres-p1);
};
if(p2) {
n1 = n1*(unsigned int)pow(10,pres-p2);
};
bool_vectors.push(host_compare(t,n1,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,(float_type)n1,cmp_type));
};
}
else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
s1_val = exe_value.top();
exe_value.pop();
if (a->type[s1_val] == 0) {
int_type* t = a->get_host_int_by_name(s1_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,n1_f,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s1_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,n1_f,cmp_type));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) {
cmp_type = reverse_op(cmp_type);
n1_f = exe_nums_f.top();
exe_nums_f.pop();
s2_val = exe_value.top();
exe_value.pop();
if (a->type[s2_val] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,(int_type)n1_f,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,n1_f,cmp_type));
};
}
else if (s1.compare("VECTOR F") == 0 && s2.compare("NUMBER") == 0) {
cmp_type = reverse_op(cmp_type);
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
n1 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,(float_type)n1,cmp_type));
hipFree(s3);
}
else if (s1.compare("VECTOR") == 0 && s2.compare("NUMBER") == 0) {
cmp_type = reverse_op(cmp_type);
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
n1 = exe_nums.top();
exe_nums.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = ::max(p1, p2);
exe_precision.push(pres);
if(p1) {
s3[0] = s3[0]*(unsigned int)pow(10,pres-p1);
s3[1] = s3[1]*(unsigned int)pow(10,pres-p1);
};
if(p2) {
n1 = n1*(unsigned int)pow(10,pres-p2);
};
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1,cmp_type));
hipFree(s3);
}
else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
n1 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,(float_type)n1,cmp_type));
hipFree(s3);
}
else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
n1 = exe_nums.top();
exe_nums.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto pres = ::max(p1, p2);
exe_precision.push(pres);
if(p1) {
s3[0] = s3[0]*(unsigned int)pow(10,pres-p1);
s3[1] = s3[1]*(unsigned int)pow(10,pres-p1);
};
if(p2) {
n1 = n1*(unsigned int)pow(10,pres-p2);
};
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1,cmp_type));
hipFree(s3);
}
else if (s1.compare("VECTOR F") == 0 && s2.compare("FLOAT") == 0) {
cmp_type = reverse_op(cmp_type);
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1_f,cmp_type));
hipFree(s3);
}
else if (s1.compare("VECTOR") == 0 && s2.compare("FLOAT") == 0) {
cmp_type = reverse_op(cmp_type);
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,(int_type)n1_f,cmp_type));
hipFree(s3);
}
else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1_f,cmp_type));
hipFree(s3);
}
else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,(int_type)n1_f,cmp_type));
hipFree(s3);
}
else if (s1.compare("VECTOR F") == 0 && s2.compare("NAME") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR");
if (a->type[s2_val] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
bool_vectors.push(host_compare(s3,t,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
bool_vectors.push(host_compare(t,s3,cmp_type));
};
hipFree(s3);
}
else if (s1.compare("VECTOR") == 0 && s2.compare("NAME") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR");
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = a->decimal_zeroes[s2_val];
if (a->type[s2_val] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
auto pres = ::max(p1, p2);
exe_precision.push(pres);
if(p1) {
t[0] = t[0]*(unsigned int)pow(10,pres-p1);
t[1] = t[1]*(unsigned int)pow(10,pres-p1);
};
if(p2) {
s3[0] = s3[0]*(unsigned int)pow(10,pres-p2);
s3[1] = s3[1]*(unsigned int)pow(10,pres-p2);
};
bool_vectors.push(host_compare(t,s3,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
bool_vectors.push(host_compare(t,s3,cmp_type));
};
hipFree(s3);
}
else if (s1.compare("NAME") == 0 && s2.compare("VECTOR F") == 0) {
cmp_type = reverse_op(cmp_type);
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR");
if (a->type[s2_val] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
bool_vectors.push(host_compare(s3,t,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
bool_vectors.push(host_compare(t,s3,cmp_type));
};
hipFree(s3);
}
else if (s1.compare("NAME") == 0 && s2.compare("VECTOR") == 0) {
cmp_type = reverse_op(cmp_type);
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR");
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = a->decimal_zeroes[s2_val];
if (a->type[s2_val] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
auto pres = ::max(p1, p2);
exe_precision.push(pres);
if(p1) {
t[0] = t[0]*(unsigned int)pow(10,pres-p1);
t[1] = t[1]*(unsigned int)pow(10,pres-p1);
};
if(p2) {
s3[0] = s3[0]*(unsigned int)pow(10,pres-p2);
s3[1] = s3[1]*(unsigned int)pow(10,pres-p2);
};
bool_vectors.push(host_compare(t,s3,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
bool_vectors.push(host_compare(t,s3,cmp_type));
};
hipFree(s3);
}
else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
int_type* s2 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = ::max(p2, p1);
exe_precision.push(pres);
if(p1) {
s3[0] = s3[0]*(unsigned int)pow(10,pres-p1);
s3[1] = s3[1]*(unsigned int)pow(10,pres-p1);
};
if(p2) {
s2[0] = s2[0]*(unsigned int)pow(10,pres-p2);
s2[1] = s2[1]*(unsigned int)pow(10,pres-p2);
};
bool_vectors.push(host_compare(s2,s3,cmp_type));
hipFree(s3);
hipFree(s2);
}
else if (s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
float_type* s2 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s2,s3,cmp_type));
hipFree(s3);
hipFree(s2);
}
else if (s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) {
cmp_type = reverse_op(cmp_type);
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
int_type* s2 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,s2,cmp_type));
hipFree(s3);
hipFree(s2);
}
else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
int_type* s2 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,s2,cmp_type));
hipFree(s3);
hipFree(s2);
}
else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) {
return 'R';
}
}
else if (ss.compare("AND") == 0) {
char s3 = bool_vectors.top();
bool_vectors.pop();
char s2 = bool_vectors.top();
bool_vectors.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_logical_and(s2,s3));
}
else if (ss.compare("OR") == 0) {
char s3 = bool_vectors.top();
bool_vectors.pop();
char s2 = bool_vectors.top();
bool_vectors.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_logical_or(s2,s3));
}
else {
if(ss.compare("JOIN") == 0)
process_error(2, "operation = is not valid");
//cout << "operation = is not valid" << endl;
else
process_error(2, "operation " + string(ss)+ " is not valid");
//cout << "operation " << ss << " is not valid" << endl;
exit(0); // never gets here
}
};
};
return bool_vectors.top();
}
| d1198ad2f93b19ce7ab784cb80e1658b2419d599.cu | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "zone_map.h"
void process_error(int severity, string err); // this should probably live in a utils header file
bool fh_equal_to(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
bool fh_less(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
bool fh_greater(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
bool fh_greater_equal_to(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
bool fh_less_equal_to(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
char host_logical_and(char column1, char column2)
{
//cout << "AND " << column1 << " " << column2 << endl;
if (column1 == 'A' && column2 == 'A')
return 'A';
else if (column1 == 'N' || column2 == 'N') {
return 'N';
}
else
return 'R';
}
char host_logical_or(char column1, char column2)
{
//cout << "OR " << column1 << " " << column2 << endl;
if (column1 == 'A' && column2 == 'A')
return 'A';
else if (column1 == 'N' && column2 == 'N')
return 'N';
else
return 'R';
}
char host_compare(int_type s, int_type d, int_type op_type)
{
char res = 'N';
if (op_type == 2 && d>s ) // >
res = 'A';
else if (op_type == 1 && d<s) // <
res = 'A';
else if (op_type == 6 && d>=s) // >=
res = 'A';
else if (op_type == 5 && d<=s) // <=
res = 'A';
else if (op_type == 4 && d==s)// =
res = 'A';
else // !=
if(d!=s) res = 'A';
return res;
}
char host_compare(float_type s, float_type d, int_type op_type)
{
char res = 'N';
if (op_type == 2 && (d-s) > EPSILON) // >
res = 'A';
else if (op_type == 1 && (s-d) > EPSILON) // <
res = 'A';
else if (op_type == 6 && ((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) // >=
res = 'A';
else if (op_type == 5 && ((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) // <=
res = 'A';
else if (op_type == 4 && ((d-s) < EPSILON) && ((d-s) > -EPSILON))// =
res = 'A';
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 'A';
return res;
}
char host_compare(int_type* column1, int_type d, int_type op_type)
{
char res = 'R';
//cout << "CMP " << column1[0] << " " << column1[1] << " with " << d << endl;
if (op_type == 2) { // >
if (column1[1] <= d)
res = 'N';
else if (column1[0] > d)
res = 'A';
}
else if (op_type == 1) { // <
if (column1[0] >= d)
res = 'N';
else if (column1[1] < d)
res = 'A';
}
else if (op_type == 6) { // >=
if (column1[1] < d)
res = 'N';
else if (column1[0] >= d)
res = 'A';
}
else if (op_type == 5) { // <=
if (column1[0] > d)
res = 'N';
else if (column1[1] <= d)
res = 'A';
}
else if (op_type == 4 && column1[0] == d && column1[1] == d) { // =
res = 'A';
};
//cout << "res " << res << endl;
return res;
}
char host_compare(float_type* column1, float_type d, int_type op_type)
{
char res = 'R';
//cout << "CMP " << column1[0] << " " << column1[1] << " with " << d << endl;
if (op_type == 2) { // >
if(fh_less_equal_to(column1[1],d)) {
res = 'N';
}
else if(fh_greater(column1[0],d)) {
res = 'A';
};
}
else if (op_type == 1) { // <
if(fh_less(column1[1],d)) {
res = 'A';
}
else if(fh_greater_equal_to(column1[0],d)) {
res = 'N';
};
}
else if (op_type == 6) { // >=
if(fh_greater_equal_to(column1[0],d)) {
res = 'A';
}
else if(fh_less(column1[1],d)) {
res = 'N';
};
}
else if (op_type == 5) { // <=
if(fh_less_equal_to(column1[1],d)) {
res = 'A';
}
else if(fh_greater(column1[0],d)) {
res = 'N';
};
}
else if (op_type == 4 && fh_equal_to(column1[0],d) && fh_equal_to(column1[1],d)) // =
res = 'A';
//cout << "res " << res << endl;
return res;
}
char host_compare(int_type* column1, int_type* column2, int_type op_type)
{
char res = 'R';
if (op_type == 2) { // >
if(column1[0] > column2[1])
res = 'A';
else if(column1[1] <= column2[0])
res = 'N';
}
else if (op_type == 1) { // <
if(column1[1] < column2[0])
res = 'A';
else if(column1[0] >= column2[1])
res = 'N';
}
else if (op_type == 6) { // >=
if(column1[0] >= column2[1])
res = 'A';
else if(column1[1] < column2[0])
res = 'N';
}
else if (op_type == 5) { // <=
if(column1[1] <= column2[0])
res = 'A';
else if(column1[0] > column2[1])
res = 'N';
}
else if (op_type == 4 && column1[0] == column2[1] && column1[1] == column2[0]) // =
res = 'A';
return res;
}
char host_compare(float_type* column1, float_type* column2, int_type op_type)
{
char res = 'R';
if (op_type == 2) { // >
if(fh_greater(column1[0],column2[1]))
res = 'A';
else if(fh_less_equal_to(column1[1],column2[0]))
res = 'N';
}
else if (op_type == 1) { // <
if(fh_less(column1[1],column2[0]))
res = 'A';
else if(fh_greater_equal_to(column1[0],column2[1]))
res = 'N';
}
else if (op_type == 6) { // >=
if(fh_greater_equal_to(column1[1],column2[0]))
res = 'A';
else if(fh_less(column1[1],column2[0]))
res = 'N';
}
else if (op_type == 5) { // <=
if(fh_less_equal_to(column1[1],column2[0]))
res = 'A';
else if(fh_greater(column1[0],column2[1]))
res = 'N';
}
else if (op_type == 4 && fh_equal_to(column1[0], column2[1]) && fh_equal_to(column1[1],column2[0])) // =
res = 'A';
return res;
}
char host_compare(float_type* column1, int_type* column2, int_type op_type)
{
char res = 'R';
if (op_type == 2) { // >
if(fh_greater(column1[0],(float_type)column2[1]))
res = 'A';
else if(fh_less_equal_to(column1[1],(float_type)column2[0]))
res = 'N';
}
else if (op_type == 1) { // <
if(fh_less(column1[1],(float_type)column2[0]))
res = 'A';
else if(fh_greater_equal_to(column1[0],(float_type)column2[1]))
res = 'N';
}
else if (op_type == 6) { // >=
if(fh_greater_equal_to(column1[1],(float_type)column2[0]))
res = 'A';
else if(fh_less(column1[1],(float_type)column2[0]))
res = 'N';
}
else if (op_type == 5) { // <=
if(fh_less_equal_to(column1[1],(float_type)column2[0]))
res = 'A';
else if(fh_greater(column1[0],(float_type)column2[1]))
res = 'N';
}
else if (op_type == 4 && fh_equal_to(column1[0],(float_type) column2[1]) && fh_equal_to(column1[1],(float_type)column2[0])) // =
res = 'A';
return res;
}
float_type* host_op(int_type* column1, float_type* column2, string op_type, int reverse)
{
float_type* temp = (float_type*)malloc(2*float_size);
temp[0] = (float_type)column1[0];
temp[1] = (float_type)column1[1];
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = temp[0] * column2[0];
temp[1] = temp[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = temp[0] + column2[0];
temp[1] = temp[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column2[0] - temp[0];
temp[1] = column2[1] - temp[1];
}
else {
temp[0] = column2[0] / temp[0];
temp[1] = column2[1] / temp[1];
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = temp[0] * column2[0];
temp[1] = temp[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = temp[0] + column2[0];
temp[1] = temp[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = temp[0] - column2[0];
temp[1] = temp[1] - column2[1];
}
else {
temp[0] = temp[0] / column2[0];
temp[1] = temp[1] / column2[1];
}
};
return temp;
}
int_type* host_op(int_type* column1, int_type* column2, string op_type, int reverse)
{
int_type* temp = (int_type*)malloc(2*int_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * column2[0];
temp[1] = column1[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + column2[0];
temp[1] = column1[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column1[0] - column2[0];
temp[1] = column1[1] - column2[1];
}
else {
temp[0] = column1[0] / column2[0];
temp[1] = column1[1] / column2[1];
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * column2[0];
temp[1] = column1[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + column2[0];
temp[1] = column1[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column2[0] - column1[0];
temp[1] = column2[1] - column1[1];
}
else {
temp[0] = column2[0] / column1[0];
temp[1] = column2[1] / column1[1];
}
}
return temp;
}
float_type* host_op(float_type* column1, float_type* column2, string op_type, int reverse)
{
float_type* temp = (float_type*)malloc(2*float_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * column2[0];
temp[1] = column1[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + column2[0];
temp[1] = column1[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column1[0] - column2[0];
temp[1] = column1[1] - column2[1];
}
else {
temp[0] = column1[0] / column2[0];
temp[1] = column1[1] / column2[1];
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * column2[0];
temp[1] = column1[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + column2[0];
temp[1] = column1[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column2[0] - column1[0];
temp[1] = column2[1] - column1[1];
}
else {
temp[0] = column2[0] / column1[0];
temp[1] = column2[1] / column1[1];
}
}
return temp;
}
int_type* host_op(int_type* column1, int_type d, string op_type, int reverse)
{
int_type* temp = (int_type*)malloc(2*int_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * d;
temp[1] = column1[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + d;
temp[1] = column1[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column1[0] - d;
temp[1] = column1[1] - d;
}
else {
temp[0] = column1[0] / d;
temp[1] = column1[1] / d;
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * d;
temp[1] = column1[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + d;
temp[1] = column1[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = d - column1[0];
temp[1] = d - column1[1];
}
else {
temp[0] = d / column1[0];
temp[1] = d / column1[1];
}
};
return temp;
}
float_type* host_op(int_type* column1, float_type d, string op_type, int reverse)
{
float_type* temp = (float_type*)malloc(2*float_size);
temp[0] = (float_type)column1[0];
temp[1] = (float_type)column1[1];
float_type* temp1 = (float_type*)malloc(2*float_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp1[0] = temp[0] * d;
temp1[1] = temp[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp1[0] = temp[0] + d;
temp1[1] = temp[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp1[0] = temp[0] - d;
temp1[1] = temp[1] - d;
}
else {
temp1[0] = temp[0] / d;
temp1[1] = temp[1] / d;
}
}
else {
if (op_type.compare("MUL") == 0) {
temp1[0] = temp[0] * d;
temp1[1] = temp[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp1[0] = temp[0] + d;
temp1[1] = temp[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp1[0] = d - temp[0];
temp1[1] = d - temp[1];
}
else {
temp1[0] = d / temp[0];
temp1[1] = d / temp[1];
}
};
free(temp);
return temp1;
}
float_type* host_op(float_type* column1, float_type d, string op_type,int reverse)
{
float_type* temp = (float_type*)malloc(2*float_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * d;
temp[1] = column1[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + d;
temp[1] = column1[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column1[0] - d;
temp[1] = column1[1] - d;
}
else {
temp[0] = column1[0] / d;
temp[1] = column1[1] / d;
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * d;
temp[1] = column1[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + d;
temp[1] = column1[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = d - column1[0];
temp[1] = d - column1[1];
}
else {
temp[0] = d / column1[0];
temp[1] = d / column1[1];
}
};
return temp;
}
unsigned int precision_func(unsigned int& p1, unsigned int& p2, string op) {
if (op.compare("DIV") != 0 ) {
unsigned int res;
if (op.compare("MUL") != 0 ) {
if(p1 > p2) {
res = p1;
p2 = p1-p2;
p1 = 0;
}
else {
res = p1;
p1 = p2-p1;
p2 = 0;
};
return res;
}
else {
//std::swap(p1,p2);
res = p1+p2;
p1 = 0;
p2 = 0;
return res;
};
}
else {
if(p1 == p2) {
p1 = p1+4;
p2 = 0;
return p1;
}
else {
if(p1 > p2) {
p1 = p1 + (p1-p2) + 4;
p2 = 0;
return p1;
}
else {
p2 = p2 + (p2-p1) + 4;
p1 = 0;
return p2;
}
}
};
}
//CudaSet a contains two records - with all minimum and maximum values of the segment
//We need to determine if this segment needs to be processed
//The check takes place in host's memory
char zone_map_check(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, queue<unsigned int> op_nums_precision, CudaSet* a, unsigned int segment)
{
stack<string> exe_type;
stack<string> exe_value;
stack<int_type*> exe_vectors;
stack<float_type*> exe_vectors_f;
stack<int_type> exe_nums;
stack<char> bool_vectors;
stack<float_type> exe_nums_f;
stack<unsigned int> exe_precision;
string s1, s2, s1_val, s2_val;
int_type n1, n2, res;
float_type n1_f, n2_f, res_f;
if(a->not_compressed)
return 'R';
//first we need to set all host arrays [0] and [1] of t to min and max values of appropriate files
set<string> uniques;
queue<string> fields(op_value);
CudaSet *t;
FILE* f;
unsigned int cnt;
string f1;
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front())) {
if(a->filtered)
t = varNames[a->source_name];
else
t = a;
// copy t min and max values to a only if int, decimal or float
if(t->type[fields.front()] <= 1) {
f1 = t->load_file_name + "." + fields.front() + "." + to_string(segment);
f = fopen (f1.c_str() , "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
fread((char *)&cnt, 4, 1, f);
if (t->type[fields.front()] == 0) {
a->h_columns_int[fields.front()].resize(2);
fread((char *)&a->h_columns_int[fields.front()][0], 8, 1, f);
fread((char *)&a->h_columns_int[fields.front()][1], 8, 1, f);
fseek(f, 8+cnt, SEEK_CUR);
fread((char *)&a->mRecCount, 4, 1, f);
//cout << endl << "ZONE " << a->mRecCount << endl;
fread((char *)&cnt, 4, 1, f);
//cout << "file " << f1 << " " << segment << " " << a->h_columns_int[fields.front()][0] << ":" << a->h_columns_int[fields.front()][1] << endl;
}
else {
long long int t;
a->h_columns_float[fields.front()].resize(2);
fread((char *)&t, 8, 1, f);
a->h_columns_float[fields.front()][0] = (float_type)t/100.0;
fread((char *)&t, 8, 1, f);
a->h_columns_float[fields.front()][1] = (float_type)t/100.0;
//cout << "file " << f1 << " " << segment << " " << a->h_columns_float[a->type_index[colIndex]][0] << ":" << a->h_columns_float[a->type_index[colIndex]][1] << endl;
};
fclose(f);
};
};
uniques.insert(fields.front());
fields.pop();
};
for(int i=0; !op_type.empty(); ++i, op_type.pop()) {
string ss = op_type.front();
if (ss.compare("NAME") == 0 || ss.compare("NUMBER") == 0 || ss.compare("VECTOR") == 0 || ss.compare("FLOAT") == 0
|| ss.compare("STRING") == 0) {
exe_type.push(ss);
if (ss.compare("NUMBER") == 0) {
exe_nums.push(op_nums.front());
op_nums.pop();
exe_precision.push(op_nums_precision.front());
op_nums_precision.pop();
}
else if (ss.compare("NAME") == 0) {
if(var_exists(a, op_value.front())) {
exe_value.push(op_value.front());
op_value.pop();
}
else {
process_error(1, "Couldn't find column " + op_value.front());
//cout << "Couldn't find column " << op_value.front() << endl;
//exit(0);
};
}
else if (ss.compare("STRING") == 0) {
exe_value.push(op_value.front());
op_value.pop();
}
if (ss.compare("FLOAT") == 0) {
exe_nums_f.push(op_nums_f.front());
op_nums_f.pop();
}
}
else {
if (ss.compare("MUL") == 0 || ss.compare("ADD") == 0 || ss.compare("DIV") == 0 || ss.compare("MINUS") == 0) {
// get 2 values from the stack
s1 = exe_type.top();
exe_type.pop();
s2 = exe_type.top();
exe_type.pop();
if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
n2 = exe_nums.top();
exe_nums.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1)
n1 = n1*(unsigned int)pow(10,p1);
if(p2)
n2 = n2*(unsigned int)pow(10,p2);
if (ss.compare("ADD") == 0 )
res = n1+n2;
else if (ss.compare("MUL") == 0 )
res = n1*n2;
else if (ss.compare("DIV") == 0 )
res = n1/n2;
else
res = n1-n2;
exe_type.push("NUMBER");
exe_nums.push(res);
}
else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2_f = exe_nums_f.top();
exe_nums_f.pop();
if (ss.compare("ADD") == 0 )
res_f = n1_f+n2_f;
else if (ss.compare("MUL") == 0 )
res_f = n1_f*n2_f;
else if (ss.compare("DIV") == 0 )
res_f = n1_f/n2_f;
else
res_f = n1_f-n2_f;
exe_type.push("FLOAT");
exe_nums_f.push(res_f);
}
else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) {
s1_val = exe_value.top();
exe_value.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR F");
if (a->type[s1_val] == 1) {
float_type* t = a->get_host_float_by_name(s1_val);
exe_vectors_f.push(host_op(t,n1_f,ss,1));
}
else {
int_type* t = a->get_host_int_by_name(s1_val);
exe_vectors_f.push(host_op(t,n1_f,ss,1));
};
}
else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR F");
if (a->type[s2_val] == 1) {
float_type* t = a->get_host_float_by_name(s2_val);
exe_vectors_f.push(host_op(t,n1_f,ss,0));
}
else {
int_type* t = a->get_host_int_by_name(s2_val);
exe_vectors_f.push(host_op(t,n1_f,ss,0));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) {
s1_val = exe_value.top();
exe_value.pop();
n1 = exe_nums.top();
exe_nums.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = a->decimal_zeroes[s1_val];
if (a->type[s1_val] == 1) {
float_type* t = a->get_host_float_by_name(s1_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,(float_type)n1,ss,1));
}
else {
int_type* t = a->get_host_int_by_name(s1_val);
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1) {
t[0] = t[0]*(unsigned int)pow(10,p1);
t[1] = t[1]*(unsigned int)pow(10,p1);
};
if(p2) {
n1 = n1*(unsigned int)pow(10,p2);
};
exe_type.push("VECTOR");
exe_vectors.push(host_op(t,n1,ss,1));
};
}
else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
s2_val = exe_value.top();
exe_value.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = a->decimal_zeroes[s2_val];
if (a->type[s2_val] == 1) {
float_type* t = a->get_host_float_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,(float_type)n1,ss,0));
}
else {
int_type* t = a->get_host_int_by_name(s2_val);
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1) {
t[0] = t[0]*(unsigned int)pow(10,p1);
t[1] = t[1]*(unsigned int)pow(10,p1);
};
if(p2) {
n1 = n1*(unsigned int)pow(10,p2);
};
exe_type.push("VECTOR");
exe_vectors.push(host_op(t,n1,ss,0));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) {
return 'R';
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0 ) && s2.compare("NAME") == 0) {
s2_val = exe_value.top();
exe_value.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = a->decimal_zeroes[s2_val];
if (a->type[s2_val] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto pres = precision_func(p1, p2,ss);
exe_precision.push(pres);
if(p1) {
t[0] = t[0]*(unsigned int)pow(10,p1);
t[1] = t[1]*(unsigned int)pow(10,p1);
};
if(p2) {
s3[0] = s3[0]*(unsigned int)pow(10,p2);
s3[1] = s3[1]*(unsigned int)pow(10,p2);
};
exe_vectors.push(host_op(t,s3,ss,0));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,s3,ss,0));
cudaFree(s3);
}
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,t, ss,0));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,s3,ss,0));
cudaFree(s3);
}
};
}
else if ((s2.compare("VECTOR") == 0 || s2.compare("VECTOR F") == 0 ) && s1.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = a->decimal_zeroes[s1_val];
if (a->type[s1_val] == 0) {
int_type* t = a->get_host_int_by_name(s1_val);
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1) {
t[0] = t[0]*(unsigned int)pow(10,p1);
t[1] = t[1]*(unsigned int)pow(10,p1);
};
if(p2) {
s3[0] = s3[0]*(unsigned int)pow(10,p2);
s3[1] = s3[1]*(unsigned int)pow(10,p2);
};
exe_vectors.push(host_op(t,s3,ss,1));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,s3,ss,1));
cudaFree(s3);
}
}
else {
float_type* t = a->get_host_float_by_name(s1_val);
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,t,ss,1));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,s3,ss,1));
cudaFree(s3);
}
};
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1) {
s3[0] = s3[0]*(unsigned int)pow(10,p1);
s3[1] = s3[1]*(unsigned int)pow(10,p1);
};
if(p2) {
n1 = n1*(unsigned int)pow(10,p2);
};
exe_vectors.push(host_op(s3,n1, ss,1));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,(float_type)n1, ss,1));
cudaFree(s3);
}
}
else if (s1.compare("NUMBER") == 0 &&( s2.compare("VECTOR") || s2.compare("VECTOR F") == 0)) {
n1 = exe_nums.top();
exe_nums.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1) {
s3[0] = s3[0]*(unsigned int)pow(10,p1);
s3[1] = s3[1]*(unsigned int)pow(10,p1);
};
if(p2) {
n1 = n1*(unsigned int)pow(10,p2);
};
exe_vectors.push(host_op(s3,n1, ss,0));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,(float_type)n1, ss,0));
cudaFree(s3);
}
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,n1_f, ss,1));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,n1_f, ss,1));
cudaFree(s3);
}
}
else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR") == 0) {
n1_f = (float_type)exe_nums.top();
exe_nums.pop();
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,n1_f, ss,0));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,n1_f, ss,0));
cudaFree(s3);
}
}
else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
int_type* s4 = exe_vectors.top();
exe_vectors.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1) {
s3[0] = s3[0]*(unsigned int)pow(10,p1);
s3[1] = s3[1]*(unsigned int)pow(10,p1);
};
if(p2) {
s4[0] = s4[0]*(unsigned int)pow(10,p2);
s4[1] = s4[1]*(unsigned int)pow(10,p2);
};
exe_type.push("VECTOR");
exe_vectors.push(host_op(s3, s4,ss,1));
cudaFree(s3);
cudaFree(s4);
}
else if(s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3, s4,ss,1));
cudaFree(s3);
cudaFree(s4);
}
else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3, s4,ss,0));
cudaFree(s3);
cudaFree(s4);
}
else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3, s4,ss,1));
cudaFree(s3);
cudaFree(s4);
}
}
else if (ss.compare("CMP") == 0) {
int_type cmp_type = op_nums.front();
op_nums.pop();
s1 = exe_type.top();
exe_type.pop();
s2 = exe_type.top();
exe_type.pop();
if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
n2 = exe_nums.top();
exe_nums.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = std::max(p1, p2);
exe_precision.push(pres);
exe_type.push("VECTOR");
if(p1)
n1 = n1*(unsigned int)pow(10,pres-p1);
if(p2)
n2 = n2*(unsigned int)pow(10,pres-p2);
bool_vectors.push(host_compare(n1,n2,cmp_type));
}
else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(n1_f,n2_f,cmp_type));
}
else if (s1.compare("FLOAT") == 0 && s2.compare("NUMBER") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(n1_f,float_type(n2),cmp_type));
}
else if (s1.compare("NUMBER") == 0 && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(n1_f,float_type(n2),cmp_type));
}
else if (s1.compare("STRING") == 0 && s2.compare("NAME") == 0) {
exe_type.push("VECTOR");
bool_vectors.push('R'); // later I plan to change implementation of char type so I will leave indexing of char off for now
}
else if (s1.compare("NAME") == 0 && s2.compare("STRING") == 0) {
exe_type.push("VECTOR");
bool_vectors.push('R');
}
else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
s1_val = exe_value.top();
exe_value.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = a->decimal_zeroes[s2_val];
if (a->type[s1_val] == 0) {
int_type* t = a->get_host_int_by_name(s1_val);
exe_type.push("VECTOR");
auto pres = std::max(p1, p2);
exe_precision.push(pres);
if(p1) {
t[0] = t[0]*(unsigned int)pow(10,pres-p1);
t[1] = t[1]*(unsigned int)pow(10,pres-p1);
};
if(p2) {
n1 = n1*(unsigned int)pow(10,pres-p2);
};
bool_vectors.push(host_compare(t,n1,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s1_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,(float_type)n1,cmp_type));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) {
cmp_type = reverse_op(cmp_type);
n1 = exe_nums.top();
exe_nums.pop();
s2_val = exe_value.top();
exe_value.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = a->decimal_zeroes[s2_val];
if (a->type[s2_val] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
exe_type.push("VECTOR");
auto pres = std::max(p1, p2);
exe_precision.push(pres);
if(p1) {
t[0] = t[0]*(unsigned int)pow(10,pres-p1);
t[1] = t[1]*(unsigned int)pow(10,pres-p1);
};
if(p2) {
n1 = n1*(unsigned int)pow(10,pres-p2);
};
bool_vectors.push(host_compare(t,n1,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,(float_type)n1,cmp_type));
};
}
else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
s1_val = exe_value.top();
exe_value.pop();
if (a->type[s1_val] == 0) {
int_type* t = a->get_host_int_by_name(s1_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,n1_f,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s1_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,n1_f,cmp_type));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) {
cmp_type = reverse_op(cmp_type);
n1_f = exe_nums_f.top();
exe_nums_f.pop();
s2_val = exe_value.top();
exe_value.pop();
if (a->type[s2_val] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,(int_type)n1_f,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,n1_f,cmp_type));
};
}
else if (s1.compare("VECTOR F") == 0 && s2.compare("NUMBER") == 0) {
cmp_type = reverse_op(cmp_type);
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
n1 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,(float_type)n1,cmp_type));
cudaFree(s3);
}
else if (s1.compare("VECTOR") == 0 && s2.compare("NUMBER") == 0) {
cmp_type = reverse_op(cmp_type);
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
n1 = exe_nums.top();
exe_nums.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = std::max(p1, p2);
exe_precision.push(pres);
if(p1) {
s3[0] = s3[0]*(unsigned int)pow(10,pres-p1);
s3[1] = s3[1]*(unsigned int)pow(10,pres-p1);
};
if(p2) {
n1 = n1*(unsigned int)pow(10,pres-p2);
};
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1,cmp_type));
cudaFree(s3);
}
else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
n1 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,(float_type)n1,cmp_type));
cudaFree(s3);
}
else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
n1 = exe_nums.top();
exe_nums.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto pres = std::max(p1, p2);
exe_precision.push(pres);
if(p1) {
s3[0] = s3[0]*(unsigned int)pow(10,pres-p1);
s3[1] = s3[1]*(unsigned int)pow(10,pres-p1);
};
if(p2) {
n1 = n1*(unsigned int)pow(10,pres-p2);
};
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1,cmp_type));
cudaFree(s3);
}
else if (s1.compare("VECTOR F") == 0 && s2.compare("FLOAT") == 0) {
cmp_type = reverse_op(cmp_type);
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1_f,cmp_type));
cudaFree(s3);
}
else if (s1.compare("VECTOR") == 0 && s2.compare("FLOAT") == 0) {
cmp_type = reverse_op(cmp_type);
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,(int_type)n1_f,cmp_type));
cudaFree(s3);
}
else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1_f,cmp_type));
cudaFree(s3);
}
else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,(int_type)n1_f,cmp_type));
cudaFree(s3);
}
else if (s1.compare("VECTOR F") == 0 && s2.compare("NAME") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR");
if (a->type[s2_val] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
bool_vectors.push(host_compare(s3,t,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
bool_vectors.push(host_compare(t,s3,cmp_type));
};
cudaFree(s3);
}
else if (s1.compare("VECTOR") == 0 && s2.compare("NAME") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR");
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = a->decimal_zeroes[s2_val];
if (a->type[s2_val] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
auto pres = std::max(p1, p2);
exe_precision.push(pres);
if(p1) {
t[0] = t[0]*(unsigned int)pow(10,pres-p1);
t[1] = t[1]*(unsigned int)pow(10,pres-p1);
};
if(p2) {
s3[0] = s3[0]*(unsigned int)pow(10,pres-p2);
s3[1] = s3[1]*(unsigned int)pow(10,pres-p2);
};
bool_vectors.push(host_compare(t,s3,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
bool_vectors.push(host_compare(t,s3,cmp_type));
};
cudaFree(s3);
}
else if (s1.compare("NAME") == 0 && s2.compare("VECTOR F") == 0) {
cmp_type = reverse_op(cmp_type);
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR");
if (a->type[s2_val] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
bool_vectors.push(host_compare(s3,t,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
bool_vectors.push(host_compare(t,s3,cmp_type));
};
cudaFree(s3);
}
else if (s1.compare("NAME") == 0 && s2.compare("VECTOR") == 0) {
cmp_type = reverse_op(cmp_type);
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR");
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = a->decimal_zeroes[s2_val];
if (a->type[s2_val] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
auto pres = std::max(p1, p2);
exe_precision.push(pres);
if(p1) {
t[0] = t[0]*(unsigned int)pow(10,pres-p1);
t[1] = t[1]*(unsigned int)pow(10,pres-p1);
};
if(p2) {
s3[0] = s3[0]*(unsigned int)pow(10,pres-p2);
s3[1] = s3[1]*(unsigned int)pow(10,pres-p2);
};
bool_vectors.push(host_compare(t,s3,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
bool_vectors.push(host_compare(t,s3,cmp_type));
};
cudaFree(s3);
}
else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
int_type* s2 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = std::max(p2, p1);
exe_precision.push(pres);
if(p1) {
s3[0] = s3[0]*(unsigned int)pow(10,pres-p1);
s3[1] = s3[1]*(unsigned int)pow(10,pres-p1);
};
if(p2) {
s2[0] = s2[0]*(unsigned int)pow(10,pres-p2);
s2[1] = s2[1]*(unsigned int)pow(10,pres-p2);
};
bool_vectors.push(host_compare(s2,s3,cmp_type));
cudaFree(s3);
cudaFree(s2);
}
else if (s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
float_type* s2 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s2,s3,cmp_type));
cudaFree(s3);
cudaFree(s2);
}
else if (s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) {
cmp_type = reverse_op(cmp_type);
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
int_type* s2 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,s2,cmp_type));
cudaFree(s3);
cudaFree(s2);
}
else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
int_type* s2 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,s2,cmp_type));
cudaFree(s3);
cudaFree(s2);
}
else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) {
return 'R';
}
}
else if (ss.compare("AND") == 0) {
char s3 = bool_vectors.top();
bool_vectors.pop();
char s2 = bool_vectors.top();
bool_vectors.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_logical_and(s2,s3));
}
else if (ss.compare("OR") == 0) {
char s3 = bool_vectors.top();
bool_vectors.pop();
char s2 = bool_vectors.top();
bool_vectors.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_logical_or(s2,s3));
}
else {
if(ss.compare("JOIN") == 0)
process_error(2, "operation = is not valid");
//cout << "operation = is not valid" << endl;
else
process_error(2, "operation " + string(ss)+ " is not valid");
//cout << "operation " << ss << " is not valid" << endl;
exit(0); // never gets here
}
};
};
return bool_vectors.top();
}
|
2482153a6758eebe09a58188fac1677c288712fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
// @author Yurii Shyrma, created on 15.11.2018
//
#include <loops/special_kernels.h>
namespace sd {
///////////////////////////////////////////////////////////////////////
template<typename T>
__device__ void concatKernelVStack(int numArrays,
Nd4jPointer *data, Nd4jPointer *inputShapeInfos,
void *vz, Nd4jLong *zShapeInfo) {
/*
this is special case for concat: we group bunch of vectors into 2D matrix
also: we expect each inputShapeInfo to have EWS, be a vector, and have equal size
*/
auto z = static_cast<T *>(vz);
auto inputShapes = (Nd4jLong **) inputShapeInfos;
T **input = (T **) data;
__shared__ int inputEWS;
__shared__ int resultEWS;
__shared__ int inputLength;
if (threadIdx.x == 0) {
inputLength = shape::length(inputShapes[0]);
inputEWS = shape::elementWiseStride(inputShapes[0]);
resultEWS = shape::elementWiseStride(zShapeInfo);
}
__syncthreads();
for (int r = blockIdx.x; r < numArrays; r += gridDim.x) {
int zOffset = r * inputLength * resultEWS;
T *inputData = (T *) input[r];
for (int i = threadIdx.x; i < inputLength; i += blockDim.x) {
z[zOffset + i * resultEWS] = inputData[i * inputEWS];
}
}
}
///////////////////////////////////////////////////////////////////////
template<typename T>
__global__ void execConcatKernelVStack(int numArrays,
Nd4jPointer *data, Nd4jPointer *inputShapeInfos,
void *vz, Nd4jLong *zShapeInfo) {
concatKernelVStack<T>(numArrays, data, inputShapeInfos, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void concatKernelVStackGeneric(dim3 &launchDims, hipStream_t *stream,
int numArrays,
Nd4jPointer *data, Nd4jPointer *inputShapeInfos,
void *vz, Nd4jLong *zShapeInfo) {
hipLaunchKernelGGL(( execConcatKernelVStack<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, numArrays, data, inputShapeInfos, vz, zShapeInfo);
sd::DebugHelper::checkErrorCode(stream, "concatVStack(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void ND4J_LOCAL concatKernelVStackGeneric, (dim3 & launchDims, hipStream_t * stream, int numArrays, Nd4jPointer * data, Nd4jPointer * inputShapeInfos, void * vz, Nd4jLong *zShapeInfo), LIBND4J_TYPES);
} | 2482153a6758eebe09a58188fac1677c288712fc.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
// @author Yurii Shyrma, created on 15.11.2018
//
#include <loops/special_kernels.h>
namespace sd {
///////////////////////////////////////////////////////////////////////
template<typename T>
__device__ void concatKernelVStack(int numArrays,
Nd4jPointer *data, Nd4jPointer *inputShapeInfos,
void *vz, Nd4jLong *zShapeInfo) {
/*
this is special case for concat: we group bunch of vectors into 2D matrix
also: we expect each inputShapeInfo to have EWS, be a vector, and have equal size
*/
auto z = static_cast<T *>(vz);
auto inputShapes = (Nd4jLong **) inputShapeInfos;
T **input = (T **) data;
__shared__ int inputEWS;
__shared__ int resultEWS;
__shared__ int inputLength;
if (threadIdx.x == 0) {
inputLength = shape::length(inputShapes[0]);
inputEWS = shape::elementWiseStride(inputShapes[0]);
resultEWS = shape::elementWiseStride(zShapeInfo);
}
__syncthreads();
for (int r = blockIdx.x; r < numArrays; r += gridDim.x) {
int zOffset = r * inputLength * resultEWS;
T *inputData = (T *) input[r];
for (int i = threadIdx.x; i < inputLength; i += blockDim.x) {
z[zOffset + i * resultEWS] = inputData[i * inputEWS];
}
}
}
///////////////////////////////////////////////////////////////////////
template<typename T>
__global__ void execConcatKernelVStack(int numArrays,
Nd4jPointer *data, Nd4jPointer *inputShapeInfos,
void *vz, Nd4jLong *zShapeInfo) {
concatKernelVStack<T>(numArrays, data, inputShapeInfos, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void concatKernelVStackGeneric(dim3 &launchDims, cudaStream_t *stream,
int numArrays,
Nd4jPointer *data, Nd4jPointer *inputShapeInfos,
void *vz, Nd4jLong *zShapeInfo) {
execConcatKernelVStack<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(numArrays, data, inputShapeInfos, vz, zShapeInfo);
sd::DebugHelper::checkErrorCode(stream, "concatVStack(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void ND4J_LOCAL concatKernelVStackGeneric, (dim3 & launchDims, cudaStream_t * stream, int numArrays, Nd4jPointer * data, Nd4jPointer * inputShapeInfos, void * vz, Nd4jLong *zShapeInfo), LIBND4J_TYPES);
} |
8a962f5fe6b99382f7f37850f59578b738e9936c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <hipfft.h>
#include <stdlib.h>
#include <omp.h>
#include <dirent.h>
#include <math.h>
#include <string.h>
// the type of data read from files
typedef struct {
int *elements;
int size;
} Array;
// GLOBALS
char* musicPath; // a string containing the path in which musics exist
char* samplePath; // a string containing the path in which samoles exist
int numberOfMusics;
int numberOfSamples;
char **musicPaths;
char **samplePaths;
// Globals for reduction
#define blockSize 1024
long double *d_array[1000];
int level = 0;
// this function reads a single file into Array *a
void read_file(char *path, Array *a);
// this function is used for testing
void test_init(Array *a);
// this function computes the fourier transform of Array *a
hipfftComplex *fft(Array *a);
// this function returns number of files in a certain path
int number_of_files(char *path);
// this function finds all file paths in char *path and puts them in char **res
void init_lists(char *path, char **res);
// char **path contains all file paths to be read
// this function reads all files with paths given in char **path
void read_files(char **paths, Array **dest, int size);
// this function returns power spectrum of Array *a
// power spectrum = sqrt(real^2 + imag^2)
long double *real_fft(Array *a);
// this kernel is used for computing the power spectrum
// out[i] = sqrt(in[i].real^2 + in[i].imag^2)
__global__ void real_kernel(hipfftComplex *in, long double *out, int size);
// this function is used after calling each cuda function
void check_errors(hipError_t status, char *line);
// serial comparison
long double compare(Array *sample, Array *music);
// serial cosine similarity
long double similarity(long double *sampleFft, long double *musicSliceFft, int size);
// this kernel does the element-by-element multiplication
__global__ void mul_kernel(long double *in1, long double *in2, long double *out, int size);
// parallel comparison
long double compare_parallel(Array *sample, Array *music);
// parallel cosine similarity
long double cosine_similarity(long double *sampleFft, long double *musicSliceFft, int size);
// this is the helper function to do reduction on a certain array
long double reduce(long double *arr, int size);
// this kernel does the reduction :)
__global__ void reduction_kernel(long double *g_idata, long double *g_odata, int size);
// serial reduction code
long double serial_reduce(long double *arr, int size);
int main(int argc, char* argv[])
{
if (argc != 3) {
printf("Invalid number of arguments\n");
return 1;
}
// read the arguments
musicPath = argv[1];
samplePath = argv[2];
// fill lists with paths
omp_set_nested(1);
#pragma omp parallel num_threads(2)
{
int id = omp_get_thread_num();
if (id == 0) {
numberOfMusics = number_of_files(musicPath);
musicPaths = (char **)malloc(sizeof(char *) * numberOfMusics);
init_lists(musicPath, musicPaths);
}
else {
numberOfSamples = number_of_files(samplePath);
samplePaths = (char **)malloc(sizeof(char *) * numberOfSamples);
init_lists(samplePath, samplePaths);
}
}
// alocate placeholders for musics and samples
Array **musics = (Array **)malloc(numberOfMusics * sizeof(Array *));
Array **samples = (Array **)malloc(numberOfSamples * sizeof(Array *));
// allocation...
#pragma omp parallel for num_threads(4)
for (int i = 0; i < numberOfMusics; i++) {
musics[i] = (Array *)malloc(sizeof(*musics[i]));
}
// allocation...
#pragma omp parallel for num_threads(4)
for (int i = 0; i < numberOfSamples; i++) {
samples[i] = (Array *)malloc(sizeof(*samples[i]));
}
#pragma omp parallel num_threads(2)
{
int id = omp_get_thread_num();
if (id == 0) {
read_files(musicPaths, musics, numberOfMusics);
}
else {
read_files(samplePaths, samples, numberOfSamples);
}
}
Array *music;
Array *sample;
long double current, best = -INFINITY;
int bestIndex = 0;
printf("\n------------------------ Musics Read -------------------------\n");
for (int i = 0; i < numberOfMusics; i++) {
printf("%s\n", musicPaths[i]);
}
printf("\n----------------------- Samples Read ------------------------\n");
for (int i = 0; i < numberOfSamples; i++) {
printf("%s\n", samplePaths[i]);
}
printf("\n-------------------------------------------------------------\n");
for (int j = 0; j < numberOfSamples; j++) {
sample = samples[j];
best = -INFINITY;
for (int i = 0; i < numberOfMusics; i++) {
music = musics[i];
current = compare_parallel(sample, music);
if (current > best) {
best = current;
bestIndex = i;
}
}
if (best > 0.7)
printf("%s >>> %s\n", samplePaths[j], musicPaths[bestIndex]);
else
printf("%s >>> Not Found\n", samplePaths[j]);
printf("\n-------------------------------------------------------------\n");
}
/*
Array *music = (Array *)malloc(sizeof(*music));
Array *sample = (Array *)malloc(sizeof(*sample));
long double current, best = -INFINITY;
int bestIndex = 0;
for (int i = 0; i < numberOfMusics; i++) {
read_file(musicPaths[i], music);
//musicFft = real_fft(music);
for (int j = 0; j < numberOfSamples; j++) {
read_file(samplePaths[j], sample);
//sampleFft = real_fft(sample);
// COMPARE
//compare(sample, music);
current = compare_parallel(sample, music);
if (current > best) {
best = current;
bestIndex = j;
}
//printf("%s, %s: %f\n", musicPaths[i], samplePaths[j], current);
//free(sampleFft);
free(sample->elements);
}
if (best > 0.7)
printf("%s >>> %s\n", musicPaths[i], samplePaths[bestIndex]);
else
printf("%s >>> Not Found\n", musicPaths[i]);
//free(musicFft);
free(music->elements);
}
*/
return 0;
}
void read_file(char *path, Array *a) {
printf("\n");
printf("started reading %s\n", path);
FILE* fp = fopen(path, "r");
int count = 0;
int i = 0;
int num;
while (fscanf(fp, " %d", &num) == 1) {
count++;
}
fclose(fp);
fp = fopen(path, "r");
a->elements = (int*)malloc(count * sizeof(int));
a->size = count;
while (fscanf(fp, " %d", &num) == 1) {
a->elements[i] = num;
i++;
}
printf("finished reading %s\n", path);
fclose(fp);
}
void test_init(Array *a) {
a->size = 100;
a->elements = (int *)malloc(a->size * sizeof(int));
for (int i = 0; i < a->size; i++) {
a->elements[i] = i;
}
}
hipfftComplex *fft(Array *a) {
// Select the GPU
hipSetDevice(0);
hipfftHandle plan;
hipfftComplex *d_data, *h_data;
// h_data is used to convert int* in Array object to a hipfftComplex type
h_data = (hipfftComplex *)malloc(a->size * sizeof(hipfftComplex));
// d_data is the copy of h_data in the GPU
hipMalloc((void **)&d_data, a->size * sizeof(hipfftComplex));
// Convert Array object to hipfftComplex type
for (int i = 0; i < a->size; i++) {
h_data[i].x = a->elements[i];
h_data[i].y = 0;
}
// copy h_data to GPU
hipMemcpy(d_data, h_data, a->size * sizeof(hipfftComplex), hipMemcpyHostToDevice);
// Compute Fourier Transform in GPU
hipfftPlan1d(&plan, a->size, HIPFFT_C2C, 1);
hipfftExecC2C(plan, d_data, d_data, HIPFFT_FORWARD);
// Wait for GPU operation to completes
hipDeviceSynchronize();
// Now copy the result back to host
hipMemcpy(h_data, d_data, a->size * sizeof(hipfftComplex), hipMemcpyDeviceToHost);
hipfftDestroy(plan);
hipFree(d_data);
return h_data;
}
long double *real_fft(Array *a) {
hipError_t status;
hipfftComplex *fourier = fft(a);
/*for (int i = 0; i < a->size; a++) {
printf("%f %f\n", fourier[i].x, fourier[i].y);
}
printf("\n");
*/
long double *out = (long double *)malloc(sizeof(long double) * a->size);
long double *d_out;
hipfftComplex *d_fourier;
status = hipMalloc((void **)&d_out, a->size * sizeof(long double));
check_errors(status, "hipMalloc(d_out)");
status = hipMalloc((void **)&d_fourier, a->size * sizeof(hipfftComplex));
check_errors(status, "hipMalloc(d_fourier)");
status = hipMemcpy(d_fourier, fourier, a->size * sizeof(hipfftComplex), hipMemcpyHostToDevice);
check_errors(status, "hipMemcpy(d_fourier, fourier)");
int n_blocks = ceil((long double)a->size / 1024.0);
int n_threads = (n_blocks > 1) ? 1024 : a->size;
real_kernel << <n_blocks, n_threads >> > (d_fourier, d_out, a->size);
check_errors(hipGetLastError(), "kernel real_fft");
status = hipDeviceSynchronize();
check_errors(status, "cudaDeviceSync");
status = hipMemcpy(out, d_out, a->size * sizeof(long double), hipMemcpyDeviceToHost);
check_errors(status, "hipMemcpy(out, d_out)");
status = hipFree(d_out);
check_errors(status, "hipFree(d_out)");
status = hipFree(d_fourier);
check_errors(status, "hipFree(d_fourier)");
free(fourier);
return out;
}
__global__ void real_kernel(hipfftComplex *in, long double *out, int size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
out[i] = sqrt(in[i].x * in[i].x + in[i].y * in[i].y);
}
}
void check_errors(hipError_t status, char *line) {
if (status != hipSuccess)
printf("\n%s - %s", hipGetErrorString(status), line);
}
long double compare(Array *sample, Array *music) {
int iters = ceil((long double)music->size / (long double)sample->size);
Array *slice = (Array *)malloc(sizeof(*slice));
long double *sampleFft = real_fft(sample);
long double *musicFft;
int sliceSize = sample->size;
int residual = iters * sliceSize - music->size;
int start;
int residualStart;
int flag = 0;
slice->size = sliceSize;
for (int k = 0; k < iters; k++) {
start = k * sliceSize;
slice->elements = &music->elements[start];
if (k == iters - 1) {
slice->elements = (int *)malloc(sliceSize * sizeof(int));
flag = 1;
residualStart = sliceSize - residual;
for (int l = 0; l < sliceSize; l++)
slice->elements[l] = (l >= residualStart) ? 0 : music->elements[l + (iters - 1)*sliceSize];
}
musicFft = real_fft(slice);
similarity(sampleFft, musicFft, sliceSize);
flag ? free(slice) : 0;
free(musicFft);
}
free(sampleFft);
return 0.0;
}
long double compare_parallel(Array *sample, Array *music) {
int iters = ceil((long double)music->size / (long double)sample->size);
Array *slice = (Array *)malloc(sizeof(*slice));
long double *sampleFft = real_fft(sample);
long double *musicFft;
int sliceSize = sample->size;
int residual = iters * sliceSize - music->size;
int start;
int residualStart;
int flag = 0;
long double sim = 0.0;
long double max = -INFINITY;
slice->size = sliceSize;
for (int k = 0; k < iters; k++) {
flag = 0;
start = k * sliceSize;
slice->elements = &music->elements[start];
if (k == iters - 1) {
slice->elements = (int *)malloc(sliceSize * sizeof(int));
flag = 1;
residualStart = sliceSize - residual;
#pragma omp parallel for num_threads(4)
for (int l = 0; l < sliceSize; l++) {
slice->elements[l] = (l >= residualStart) ? 0 : music->elements[l + (iters - 1)*sliceSize];
}
}
musicFft = real_fft(slice);
sim = cosine_similarity(sampleFft, musicFft, sliceSize);
if (sim > max) {
max = sim;
}
flag ? free(slice) : 0;
free(musicFft);
}
free(sampleFft);
return max;
}
long double cosine_similarity(long double *sampleFft, long double *musicSliceFft, int size) {
long double *d_in1, *d_in2, *d_out;
long double *dot, *norm1, *norm2;
dot = (long double *)malloc(size * sizeof(long double));
norm1 = (long double *)malloc(size * sizeof(long double));
norm2 = (long double *)malloc(size * sizeof(long double));
hipError_t status;
hipSetDevice(0);
status = hipMalloc((void **)&d_in1, size * sizeof(long double));
check_errors(status, "hipMalloc(in1)");
status = hipMalloc((void **)&d_in2, size * sizeof(long double));
check_errors(status, "hipMalloc(in2)");
status = hipMalloc((void **)&d_out, size * sizeof(long double));
check_errors(status, "hipMalloc(out)");
status = hipMemcpy(d_in1, sampleFft, size * sizeof(long double), hipMemcpyHostToDevice);
check_errors(status, "hipMemcpy(in1)");
status = hipMemcpy(d_in2, musicSliceFft, size * sizeof(long double), hipMemcpyHostToDevice);
check_errors(status, "hipMemcpy(in2)");
int n_blocks = ceil((long double)size / 1024.0);
int n_threads = (n_blocks > 1) ? 1024 : size;
mul_kernel << <n_blocks, n_threads >> > (d_in1, d_in2, d_out, size);
check_errors(hipGetLastError(), "kernel(in1, in2)");
status = hipDeviceSynchronize();
check_errors(status, "cudaDeviceSync");
status = hipMemcpy(dot, d_out, size * sizeof(long double), hipMemcpyDeviceToHost);
check_errors(status, "hipMemcpy(dot, d_out)");
mul_kernel << <n_blocks, n_threads >> > (d_in1, d_in1, d_out, size);
check_errors(hipGetLastError(), "kernel(in1, in1)");
status = hipDeviceSynchronize();
check_errors(status, "cudaDeviceSync");
status = hipMemcpy(norm1, d_out, size * sizeof(long double), hipMemcpyDeviceToHost);
check_errors(status, "hipMemcpy(norm1, d_out)");
mul_kernel << <n_blocks, n_threads >> > (d_in2, d_in2, d_out, size);
check_errors(hipGetLastError(), "kernel(in2, in2)");
status = hipDeviceSynchronize();
check_errors(status, "cudaDeviceSync");
status = hipMemcpy(norm2, d_out, size * sizeof(long double), hipMemcpyDeviceToHost);
check_errors(status, "hipMemcpy(norm2, d_out)");
long double f_dot = reduce(dot, size);
long double f_norm1 = sqrt(reduce(norm1, size));
long double f_norm2 = sqrt(reduce(norm2, size));
status = hipFree(d_in1);
check_errors(status, "hipFree(in1)");
status = hipFree(d_in2);
check_errors(status, "hipFree(in2)");
status = hipFree(d_out);
check_errors(status, "hipFree(out)");
long double res = (f_dot) / (f_norm1 * f_norm2);
return res;
}
__global__ void mul_kernel(long double *in1, long double *in2, long double *out, int size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size)
out[i] = in1[i] * in2[i];
}
long double reduce(long double *arr, int size) {
int newsize = pow(2, ceil(log(size) / log(2)));
arr = (long double *)realloc(arr, newsize * sizeof(long double));
#pragma omp parallel for num_threads(6)
for (int i = size; i < newsize; i++)
arr[i] = 0;
hipError_t status;
status = hipSetDevice(0);
check_errors(status, "Dev Set");
int i = newsize;
int dcount = 0;
while (i != 0) {
status = hipMalloc((void **)&d_array[level], i * sizeof(long double));
//printf("allocated level %d with size %d\n", level, i);
dcount++;
check_errors(status, "hipMalloc(d_array[level])");
if (i == 1) {
i = 0;
}
else {
i = ((i - 1) / blockSize) + 1;
}
level++;
}
status = hipMemcpy(d_array[0], arr, newsize * sizeof(long double), hipMemcpyHostToDevice);
check_errors(status, "Memcpy(d_array[0], arr)");
int current = newsize;
int next = ((current - 1) / blockSize) + 1;
int counter = 0;
while (current != 1) {
reduction_kernel << <next, blockSize/2 >> > (d_array[counter], d_array[counter + 1], current);
//printf("called kernel for level %d and %d\n", counter, counter+1);
check_errors(hipGetLastError(), "kernel");
current = next;
next = ((current - 1) / blockSize) + 1;
counter++;
}
status = hipDeviceSynchronize();
check_errors(status, "Dev Sync");
hipMemcpy(arr, d_array[level - 1], sizeof(long double), hipMemcpyDeviceToHost);
for (int j = 0; i < dcount; i++) {
status = hipFree(d_array[i]);
check_errors(status, "hipFree");
}
level = 0;
float res = arr[0];
free(arr);
return res;
}
long double serial_reduce(long double *arr, int size) {
long double res = 0.0;
for (int i = 0; i < size; i++)
res += arr[i];
return res;
}
__global__ void reduction_kernel(long double *g_idata, long double *g_odata, int size)
{
__shared__ long double sdata[blockSize];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
if (i >= size)
sdata[tid] = 0;
else if (i + blockDim.x >= size)
sdata[tid] = g_idata[i];
else
sdata[tid] = g_idata[i] + g_idata[i + blockDim.x];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1)
{
if (tid < s)
sdata[tid] += sdata[tid + s];
__syncthreads();
}
if (tid < 32)
{
sdata[tid] += sdata[tid + 32];
__syncwarp();
sdata[tid] += sdata[tid + 16];
__syncwarp();
sdata[tid] += sdata[tid + 8];
__syncwarp();
sdata[tid] += sdata[tid + 4];
__syncwarp();
sdata[tid] += sdata[tid + 2];
__syncwarp();
sdata[tid] += sdata[tid + 1];
__syncwarp();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
long double similarity(long double *sampleFft, long double *musicSliceFft, int size) {
long double res = 0.0;
long double dot = 0.0;
long double norm1 = 0.0;
long double norm2 = 0.0;
for (int i = 0; i < size; i++) {
dot += sampleFft[i] * musicSliceFft[i];
norm1 += sampleFft[i] * sampleFft[i];
norm2 += musicSliceFft[i] * musicSliceFft[i];
}
norm1 = sqrt(norm1);
norm2 = sqrt(norm2);
res = dot / (norm1 * norm2);
return res;
}
int number_of_files(char *path) {
DIR *dir;
struct dirent *ent;
char *dot;
int count = 0;
if ((dir = opendir(path)) != NULL) {
/* print all the files and directories within directory */
while ((ent = readdir(dir)) != NULL) {
dot = strrchr(ent->d_name, '.');
if (dot && !strcmp(dot, ".txt")) {
count++;
}
}
closedir(dir);
}
else {
/* could not open directory */
perror("");
}
return count;
}
void init_lists(char *path, char **res) {
int i = 0;
char currentPath[500];
DIR *dir;
struct dirent *ent;
char *dot;
if ((dir = opendir(path)) != NULL) {
/* print all the files and directories within directory */
while ((ent = readdir(dir)) != NULL) {
dot = strrchr(ent->d_name, '.');
if (dot && !strcmp(dot, ".txt")) {
strcpy(currentPath, path);
strcat(currentPath, "\\");
strcat(currentPath, ent->d_name);
res[i] = (char *)malloc(strlen(currentPath) * sizeof(char));
strcpy(res[i], currentPath);
i++;
}
}
closedir(dir);
}
else {
/* could not open directory */
perror("");
}
}
void read_files(char **paths, Array **dest, int size) {
char *currentPath;
#pragma omp parallel for num_threads(4) private(currentPath)
for (int i = 0; i < size; i++) {
currentPath = paths[i];
read_file(currentPath, dest[i]);
}
}
| 8a962f5fe6b99382f7f37850f59578b738e9936c.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cufft.h>
#include <stdlib.h>
#include <omp.h>
#include <dirent.h>
#include <math.h>
#include <string.h>
// the type of data read from files
typedef struct {
int *elements;
int size;
} Array;
// GLOBALS
char* musicPath; // a string containing the path in which musics exist
char* samplePath; // a string containing the path in which samoles exist
int numberOfMusics;
int numberOfSamples;
char **musicPaths;
char **samplePaths;
// Globals for reduction
#define blockSize 1024
long double *d_array[1000];
int level = 0;
// this function reads a single file into Array *a
void read_file(char *path, Array *a);
// this function is used for testing
void test_init(Array *a);
// this function computes the fourier transform of Array *a
cufftComplex *fft(Array *a);
// this function returns number of files in a certain path
int number_of_files(char *path);
// this function finds all file paths in char *path and puts them in char **res
void init_lists(char *path, char **res);
// char **path contains all file paths to be read
// this function reads all files with paths given in char **path
void read_files(char **paths, Array **dest, int size);
// this function returns power spectrum of Array *a
// power spectrum = sqrt(real^2 + imag^2)
long double *real_fft(Array *a);
// this kernel is used for computing the power spectrum
// out[i] = sqrt(in[i].real^2 + in[i].imag^2)
__global__ void real_kernel(cufftComplex *in, long double *out, int size);
// this function is used after calling each cuda function
void check_errors(cudaError_t status, char *line);
// serial comparison
long double compare(Array *sample, Array *music);
// serial cosine similarity
long double similarity(long double *sampleFft, long double *musicSliceFft, int size);
// this kernel does the element-by-element multiplication
__global__ void mul_kernel(long double *in1, long double *in2, long double *out, int size);
// parallel comparison
long double compare_parallel(Array *sample, Array *music);
// parallel cosine similarity
long double cosine_similarity(long double *sampleFft, long double *musicSliceFft, int size);
// this is the helper function to do reduction on a certain array
long double reduce(long double *arr, int size);
// this kernel does the reduction :)
__global__ void reduction_kernel(long double *g_idata, long double *g_odata, int size);
// serial reduction code
long double serial_reduce(long double *arr, int size);
int main(int argc, char* argv[])
{
if (argc != 3) {
printf("Invalid number of arguments\n");
return 1;
}
// read the arguments
musicPath = argv[1];
samplePath = argv[2];
// fill lists with paths
omp_set_nested(1);
#pragma omp parallel num_threads(2)
{
int id = omp_get_thread_num();
if (id == 0) {
numberOfMusics = number_of_files(musicPath);
musicPaths = (char **)malloc(sizeof(char *) * numberOfMusics);
init_lists(musicPath, musicPaths);
}
else {
numberOfSamples = number_of_files(samplePath);
samplePaths = (char **)malloc(sizeof(char *) * numberOfSamples);
init_lists(samplePath, samplePaths);
}
}
// alocate placeholders for musics and samples
Array **musics = (Array **)malloc(numberOfMusics * sizeof(Array *));
Array **samples = (Array **)malloc(numberOfSamples * sizeof(Array *));
// allocation...
#pragma omp parallel for num_threads(4)
for (int i = 0; i < numberOfMusics; i++) {
musics[i] = (Array *)malloc(sizeof(*musics[i]));
}
// allocation...
#pragma omp parallel for num_threads(4)
for (int i = 0; i < numberOfSamples; i++) {
samples[i] = (Array *)malloc(sizeof(*samples[i]));
}
#pragma omp parallel num_threads(2)
{
int id = omp_get_thread_num();
if (id == 0) {
read_files(musicPaths, musics, numberOfMusics);
}
else {
read_files(samplePaths, samples, numberOfSamples);
}
}
Array *music;
Array *sample;
long double current, best = -INFINITY;
int bestIndex = 0;
printf("\n------------------------ Musics Read -------------------------\n");
for (int i = 0; i < numberOfMusics; i++) {
printf("%s\n", musicPaths[i]);
}
printf("\n----------------------- Samples Read ------------------------\n");
for (int i = 0; i < numberOfSamples; i++) {
printf("%s\n", samplePaths[i]);
}
printf("\n-------------------------------------------------------------\n");
for (int j = 0; j < numberOfSamples; j++) {
sample = samples[j];
best = -INFINITY;
for (int i = 0; i < numberOfMusics; i++) {
music = musics[i];
current = compare_parallel(sample, music);
if (current > best) {
best = current;
bestIndex = i;
}
}
if (best > 0.7)
printf("%s >>> %s\n", samplePaths[j], musicPaths[bestIndex]);
else
printf("%s >>> Not Found\n", samplePaths[j]);
printf("\n-------------------------------------------------------------\n");
}
/*
Array *music = (Array *)malloc(sizeof(*music));
Array *sample = (Array *)malloc(sizeof(*sample));
long double current, best = -INFINITY;
int bestIndex = 0;
for (int i = 0; i < numberOfMusics; i++) {
read_file(musicPaths[i], music);
//musicFft = real_fft(music);
for (int j = 0; j < numberOfSamples; j++) {
read_file(samplePaths[j], sample);
//sampleFft = real_fft(sample);
// COMPARE
//compare(sample, music);
current = compare_parallel(sample, music);
if (current > best) {
best = current;
bestIndex = j;
}
//printf("%s, %s: %f\n", musicPaths[i], samplePaths[j], current);
//free(sampleFft);
free(sample->elements);
}
if (best > 0.7)
printf("%s >>> %s\n", musicPaths[i], samplePaths[bestIndex]);
else
printf("%s >>> Not Found\n", musicPaths[i]);
//free(musicFft);
free(music->elements);
}
*/
return 0;
}
void read_file(char *path, Array *a) {
printf("\n");
printf("started reading %s\n", path);
FILE* fp = fopen(path, "r");
int count = 0;
int i = 0;
int num;
while (fscanf(fp, " %d", &num) == 1) {
count++;
}
fclose(fp);
fp = fopen(path, "r");
a->elements = (int*)malloc(count * sizeof(int));
a->size = count;
while (fscanf(fp, " %d", &num) == 1) {
a->elements[i] = num;
i++;
}
printf("finished reading %s\n", path);
fclose(fp);
}
void test_init(Array *a) {
a->size = 100;
a->elements = (int *)malloc(a->size * sizeof(int));
for (int i = 0; i < a->size; i++) {
a->elements[i] = i;
}
}
cufftComplex *fft(Array *a) {
// Select the GPU
cudaSetDevice(0);
cufftHandle plan;
cufftComplex *d_data, *h_data;
// h_data is used to convert int* in Array object to a cufftComplex type
h_data = (cufftComplex *)malloc(a->size * sizeof(cufftComplex));
// d_data is the copy of h_data in the GPU
cudaMalloc((void **)&d_data, a->size * sizeof(cufftComplex));
// Convert Array object to cufftComplex type
for (int i = 0; i < a->size; i++) {
h_data[i].x = a->elements[i];
h_data[i].y = 0;
}
// copy h_data to GPU
cudaMemcpy(d_data, h_data, a->size * sizeof(cufftComplex), cudaMemcpyHostToDevice);
// Compute Fourier Transform in GPU
cufftPlan1d(&plan, a->size, CUFFT_C2C, 1);
cufftExecC2C(plan, d_data, d_data, CUFFT_FORWARD);
// Wait for GPU operation to completes
cudaDeviceSynchronize();
// Now copy the result back to host
cudaMemcpy(h_data, d_data, a->size * sizeof(cufftComplex), cudaMemcpyDeviceToHost);
cufftDestroy(plan);
cudaFree(d_data);
return h_data;
}
long double *real_fft(Array *a) {
cudaError_t status;
cufftComplex *fourier = fft(a);
/*for (int i = 0; i < a->size; a++) {
printf("%f %f\n", fourier[i].x, fourier[i].y);
}
printf("\n");
*/
long double *out = (long double *)malloc(sizeof(long double) * a->size);
long double *d_out;
cufftComplex *d_fourier;
status = cudaMalloc((void **)&d_out, a->size * sizeof(long double));
check_errors(status, "cudaMalloc(d_out)");
status = cudaMalloc((void **)&d_fourier, a->size * sizeof(cufftComplex));
check_errors(status, "cudaMalloc(d_fourier)");
status = cudaMemcpy(d_fourier, fourier, a->size * sizeof(cufftComplex), cudaMemcpyHostToDevice);
check_errors(status, "cudaMemcpy(d_fourier, fourier)");
int n_blocks = ceil((long double)a->size / 1024.0);
int n_threads = (n_blocks > 1) ? 1024 : a->size;
real_kernel << <n_blocks, n_threads >> > (d_fourier, d_out, a->size);
check_errors(cudaGetLastError(), "kernel real_fft");
status = cudaDeviceSynchronize();
check_errors(status, "cudaDeviceSync");
status = cudaMemcpy(out, d_out, a->size * sizeof(long double), cudaMemcpyDeviceToHost);
check_errors(status, "cudaMemcpy(out, d_out)");
status = cudaFree(d_out);
check_errors(status, "cudaFree(d_out)");
status = cudaFree(d_fourier);
check_errors(status, "cudaFree(d_fourier)");
free(fourier);
return out;
}
__global__ void real_kernel(cufftComplex *in, long double *out, int size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
out[i] = sqrt(in[i].x * in[i].x + in[i].y * in[i].y);
}
}
void check_errors(cudaError_t status, char *line) {
if (status != cudaSuccess)
printf("\n%s - %s", cudaGetErrorString(status), line);
}
long double compare(Array *sample, Array *music) {
int iters = ceil((long double)music->size / (long double)sample->size);
Array *slice = (Array *)malloc(sizeof(*slice));
long double *sampleFft = real_fft(sample);
long double *musicFft;
int sliceSize = sample->size;
int residual = iters * sliceSize - music->size;
int start;
int residualStart;
int flag = 0;
slice->size = sliceSize;
for (int k = 0; k < iters; k++) {
start = k * sliceSize;
slice->elements = &music->elements[start];
if (k == iters - 1) {
slice->elements = (int *)malloc(sliceSize * sizeof(int));
flag = 1;
residualStart = sliceSize - residual;
for (int l = 0; l < sliceSize; l++)
slice->elements[l] = (l >= residualStart) ? 0 : music->elements[l + (iters - 1)*sliceSize];
}
musicFft = real_fft(slice);
similarity(sampleFft, musicFft, sliceSize);
flag ? free(slice) : 0;
free(musicFft);
}
free(sampleFft);
return 0.0;
}
long double compare_parallel(Array *sample, Array *music) {
int iters = ceil((long double)music->size / (long double)sample->size);
Array *slice = (Array *)malloc(sizeof(*slice));
long double *sampleFft = real_fft(sample);
long double *musicFft;
int sliceSize = sample->size;
int residual = iters * sliceSize - music->size;
int start;
int residualStart;
int flag = 0;
long double sim = 0.0;
long double max = -INFINITY;
slice->size = sliceSize;
for (int k = 0; k < iters; k++) {
flag = 0;
start = k * sliceSize;
slice->elements = &music->elements[start];
if (k == iters - 1) {
slice->elements = (int *)malloc(sliceSize * sizeof(int));
flag = 1;
residualStart = sliceSize - residual;
#pragma omp parallel for num_threads(4)
for (int l = 0; l < sliceSize; l++) {
slice->elements[l] = (l >= residualStart) ? 0 : music->elements[l + (iters - 1)*sliceSize];
}
}
musicFft = real_fft(slice);
sim = cosine_similarity(sampleFft, musicFft, sliceSize);
if (sim > max) {
max = sim;
}
flag ? free(slice) : 0;
free(musicFft);
}
free(sampleFft);
return max;
}
long double cosine_similarity(long double *sampleFft, long double *musicSliceFft, int size) {
long double *d_in1, *d_in2, *d_out;
long double *dot, *norm1, *norm2;
dot = (long double *)malloc(size * sizeof(long double));
norm1 = (long double *)malloc(size * sizeof(long double));
norm2 = (long double *)malloc(size * sizeof(long double));
cudaError_t status;
cudaSetDevice(0);
status = cudaMalloc((void **)&d_in1, size * sizeof(long double));
check_errors(status, "cudaMalloc(in1)");
status = cudaMalloc((void **)&d_in2, size * sizeof(long double));
check_errors(status, "cudaMalloc(in2)");
status = cudaMalloc((void **)&d_out, size * sizeof(long double));
check_errors(status, "cudaMalloc(out)");
status = cudaMemcpy(d_in1, sampleFft, size * sizeof(long double), cudaMemcpyHostToDevice);
check_errors(status, "cudaMemcpy(in1)");
status = cudaMemcpy(d_in2, musicSliceFft, size * sizeof(long double), cudaMemcpyHostToDevice);
check_errors(status, "cudaMemcpy(in2)");
int n_blocks = ceil((long double)size / 1024.0);
int n_threads = (n_blocks > 1) ? 1024 : size;
mul_kernel << <n_blocks, n_threads >> > (d_in1, d_in2, d_out, size);
check_errors(cudaGetLastError(), "kernel(in1, in2)");
status = cudaDeviceSynchronize();
check_errors(status, "cudaDeviceSync");
status = cudaMemcpy(dot, d_out, size * sizeof(long double), cudaMemcpyDeviceToHost);
check_errors(status, "cudaMemcpy(dot, d_out)");
mul_kernel << <n_blocks, n_threads >> > (d_in1, d_in1, d_out, size);
check_errors(cudaGetLastError(), "kernel(in1, in1)");
status = cudaDeviceSynchronize();
check_errors(status, "cudaDeviceSync");
status = cudaMemcpy(norm1, d_out, size * sizeof(long double), cudaMemcpyDeviceToHost);
check_errors(status, "cudaMemcpy(norm1, d_out)");
mul_kernel << <n_blocks, n_threads >> > (d_in2, d_in2, d_out, size);
check_errors(cudaGetLastError(), "kernel(in2, in2)");
status = cudaDeviceSynchronize();
check_errors(status, "cudaDeviceSync");
status = cudaMemcpy(norm2, d_out, size * sizeof(long double), cudaMemcpyDeviceToHost);
check_errors(status, "cudaMemcpy(norm2, d_out)");
long double f_dot = reduce(dot, size);
long double f_norm1 = sqrt(reduce(norm1, size));
long double f_norm2 = sqrt(reduce(norm2, size));
status = cudaFree(d_in1);
check_errors(status, "cudaFree(in1)");
status = cudaFree(d_in2);
check_errors(status, "cudaFree(in2)");
status = cudaFree(d_out);
check_errors(status, "cudaFree(out)");
long double res = (f_dot) / (f_norm1 * f_norm2);
return res;
}
__global__ void mul_kernel(long double *in1, long double *in2, long double *out, int size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size)
out[i] = in1[i] * in2[i];
}
long double reduce(long double *arr, int size) {
int newsize = pow(2, ceil(log(size) / log(2)));
arr = (long double *)realloc(arr, newsize * sizeof(long double));
#pragma omp parallel for num_threads(6)
for (int i = size; i < newsize; i++)
arr[i] = 0;
cudaError_t status;
status = cudaSetDevice(0);
check_errors(status, "Dev Set");
int i = newsize;
int dcount = 0;
while (i != 0) {
status = cudaMalloc((void **)&d_array[level], i * sizeof(long double));
//printf("allocated level %d with size %d\n", level, i);
dcount++;
check_errors(status, "cudaMalloc(d_array[level])");
if (i == 1) {
i = 0;
}
else {
i = ((i - 1) / blockSize) + 1;
}
level++;
}
status = cudaMemcpy(d_array[0], arr, newsize * sizeof(long double), cudaMemcpyHostToDevice);
check_errors(status, "Memcpy(d_array[0], arr)");
int current = newsize;
int next = ((current - 1) / blockSize) + 1;
int counter = 0;
while (current != 1) {
reduction_kernel << <next, blockSize/2 >> > (d_array[counter], d_array[counter + 1], current);
//printf("called kernel for level %d and %d\n", counter, counter+1);
check_errors(cudaGetLastError(), "kernel");
current = next;
next = ((current - 1) / blockSize) + 1;
counter++;
}
status = cudaDeviceSynchronize();
check_errors(status, "Dev Sync");
cudaMemcpy(arr, d_array[level - 1], sizeof(long double), cudaMemcpyDeviceToHost);
for (int j = 0; i < dcount; i++) {
status = cudaFree(d_array[i]);
check_errors(status, "cudaFree");
}
level = 0;
float res = arr[0];
free(arr);
return res;
}
long double serial_reduce(long double *arr, int size) {
long double res = 0.0;
for (int i = 0; i < size; i++)
res += arr[i];
return res;
}
__global__ void reduction_kernel(long double *g_idata, long double *g_odata, int size)
{
__shared__ long double sdata[blockSize];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
if (i >= size)
sdata[tid] = 0;
else if (i + blockDim.x >= size)
sdata[tid] = g_idata[i];
else
sdata[tid] = g_idata[i] + g_idata[i + blockDim.x];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1)
{
if (tid < s)
sdata[tid] += sdata[tid + s];
__syncthreads();
}
if (tid < 32)
{
sdata[tid] += sdata[tid + 32];
__syncwarp();
sdata[tid] += sdata[tid + 16];
__syncwarp();
sdata[tid] += sdata[tid + 8];
__syncwarp();
sdata[tid] += sdata[tid + 4];
__syncwarp();
sdata[tid] += sdata[tid + 2];
__syncwarp();
sdata[tid] += sdata[tid + 1];
__syncwarp();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
long double similarity(long double *sampleFft, long double *musicSliceFft, int size) {
long double res = 0.0;
long double dot = 0.0;
long double norm1 = 0.0;
long double norm2 = 0.0;
for (int i = 0; i < size; i++) {
dot += sampleFft[i] * musicSliceFft[i];
norm1 += sampleFft[i] * sampleFft[i];
norm2 += musicSliceFft[i] * musicSliceFft[i];
}
norm1 = sqrt(norm1);
norm2 = sqrt(norm2);
res = dot / (norm1 * norm2);
return res;
}
int number_of_files(char *path) {
DIR *dir;
struct dirent *ent;
char *dot;
int count = 0;
if ((dir = opendir(path)) != NULL) {
/* print all the files and directories within directory */
while ((ent = readdir(dir)) != NULL) {
dot = strrchr(ent->d_name, '.');
if (dot && !strcmp(dot, ".txt")) {
count++;
}
}
closedir(dir);
}
else {
/* could not open directory */
perror("");
}
return count;
}
void init_lists(char *path, char **res) {
int i = 0;
char currentPath[500];
DIR *dir;
struct dirent *ent;
char *dot;
if ((dir = opendir(path)) != NULL) {
/* print all the files and directories within directory */
while ((ent = readdir(dir)) != NULL) {
dot = strrchr(ent->d_name, '.');
if (dot && !strcmp(dot, ".txt")) {
strcpy(currentPath, path);
strcat(currentPath, "\\");
strcat(currentPath, ent->d_name);
res[i] = (char *)malloc(strlen(currentPath) * sizeof(char));
strcpy(res[i], currentPath);
i++;
}
}
closedir(dir);
}
else {
/* could not open directory */
perror("");
}
}
void read_files(char **paths, Array **dest, int size) {
char *currentPath;
#pragma omp parallel for num_threads(4) private(currentPath)
for (int i = 0; i < size; i++) {
currentPath = paths[i];
read_file(currentPath, dest[i]);
}
}
|
f44cd22562de939a43ecab392cebeb27398ccfcd.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef _BACKPROP_CUDA_KERNEL_H_
#define _BACKPROP_CUDA_KERNEL_H_
#include <stdio.h>
#include "backprop.h"
#include "math.h"
#include "hip/hip_runtime.h"
#define OPT2
#ifdef OPT1
__global__ void
bpnn_layerforward_CUDA(float *input_cuda,
float *output_hidden_cuda,
float *input_hidden_cuda,
float *hidden_partial_sum,
int in,
int hid)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_in = HEIGHT * by + ty + 1;
__shared__ float input_node[HEIGHT];
__shared__ float weight_matrix[HEIGHT][WIDTH];
weight_matrix[ty][tx] = input_hidden_cuda[index] * input_cuda[index_in];
__syncthreads();
for ( int i = 1 ; i <= __log2f(HEIGHT) ; i++){
int power_two = __powf(2, i);
if (ty % power_two == 0)
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
__syncthreads();
}
input_hidden_cuda[index] = weight_matrix[ty][tx];
if (tx == 0) {
hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty];
}
}
#endif
#ifdef OPT2
__global__ void
bpnn_layerforward_CUDA(float *input_cuda,
float *output_hidden_cuda,
float *input_hidden_cuda,
float *hidden_partial_sum,
int in,
int hid)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_in = HEIGHT * by + ty + 1;
__shared__ float input_node[HEIGHT];
__shared__ float weight_matrix[HEIGHT][WIDTH];
weight_matrix[ty][tx] = input_hidden_cuda[index] * input_cuda[index_in];
__syncthreads();
for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){
unsigned int power_two = i - 1;
if( (ty & power_two) == 0 ) {
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
}
__syncthreads();
}
input_hidden_cuda[index] = weight_matrix[ty][tx];
if ( tx == 0 ) {
hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty];
}
}
#endif
__global__ void bpnn_adjust_weights_cuda(float * delta,
int hid,
float * ly,
int in,
float * w,
float * oldw)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_y = HEIGHT * by + ty + 1;
int index_x = tx + 1;
//eta = 0.3;
//momentum = 0.3;
w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
__syncthreads();
if (ty == 0 && by ==0){
w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
}
}
#endif
| f44cd22562de939a43ecab392cebeb27398ccfcd.cu |
#ifndef _BACKPROP_CUDA_KERNEL_H_
#define _BACKPROP_CUDA_KERNEL_H_
#include <stdio.h>
#include "backprop.h"
#include "math.h"
#include "cuda.h"
#define OPT2
#ifdef OPT1
__global__ void
bpnn_layerforward_CUDA(float *input_cuda,
float *output_hidden_cuda,
float *input_hidden_cuda,
float *hidden_partial_sum,
int in,
int hid)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_in = HEIGHT * by + ty + 1;
__shared__ float input_node[HEIGHT];
__shared__ float weight_matrix[HEIGHT][WIDTH];
weight_matrix[ty][tx] = input_hidden_cuda[index] * input_cuda[index_in];
__syncthreads();
for ( int i = 1 ; i <= __log2f(HEIGHT) ; i++){
int power_two = __powf(2, i);
if (ty % power_two == 0)
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
__syncthreads();
}
input_hidden_cuda[index] = weight_matrix[ty][tx];
if (tx == 0) {
hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty];
}
}
#endif
#ifdef OPT2
__global__ void
bpnn_layerforward_CUDA(float *input_cuda,
float *output_hidden_cuda,
float *input_hidden_cuda,
float *hidden_partial_sum,
int in,
int hid)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_in = HEIGHT * by + ty + 1;
__shared__ float input_node[HEIGHT];
__shared__ float weight_matrix[HEIGHT][WIDTH];
weight_matrix[ty][tx] = input_hidden_cuda[index] * input_cuda[index_in];
__syncthreads();
for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){
unsigned int power_two = i - 1;
if( (ty & power_two) == 0 ) {
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
}
__syncthreads();
}
input_hidden_cuda[index] = weight_matrix[ty][tx];
if ( tx == 0 ) {
hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty];
}
}
#endif
__global__ void bpnn_adjust_weights_cuda(float * delta,
int hid,
float * ly,
int in,
float * w,
float * oldw)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_y = HEIGHT * by + ty + 1;
int index_x = tx + 1;
//eta = 0.3;
//momentum = 0.3;
w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
__syncthreads();
if (ty == 0 && by ==0){
w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
}
}
#endif
|
8cfa417b1950fe12102846b38bcdfa8399c0d4d8.hip | // !!! This is a file automatically generated by hipify!!!
/*
Compute t-SNE via Barnes-Hut for NlogN time.
*/
#include "include/fit_tsne.h"
#include <chrono>
#define START_IL_TIMER() start = std::chrono::high_resolution_clock::now();
#define END_IL_TIMER(x) stop = std::chrono::high_resolution_clock::now(); duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start); x += duration; total_time += duration;
#define PRINT_IL_TIMER(x) std::cout << #x << ": " << ((float) x.count()) / 1000000.0 << "s" << std::endl
void tsnecuda::RunTsne(tsnecuda::Options &opt,
tsnecuda::GpuOptions &gpu_opt)
{
auto start = std::chrono::high_resolution_clock::now();
auto stop = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start);
auto total_time = duration;
auto _time_initialization = duration;
auto _time_knn = duration;
auto _time_symmetry = duration;
auto _time_init_low_dim = duration;
auto _time_init_fft = duration;
auto _time_precompute_2d = duration;
auto _time_nbodyfft = duration;
auto _time_compute_charges = duration;
auto _time_other = duration;
auto _time_norm = duration;
auto _time_attr = duration;
auto _time_apply_forces = duration;
// Check the validity of the options file
if (!opt.validate()) {
std::cout << "E: Invalid options file. Terminating." << std::endl;
return;
}
START_IL_TIMER();
// Construct the handles
hipblasHandle_t dense_handle;
CublasSafeCall(hipblasCreate(&dense_handle));
hipsparseHandle_t sparse_handle;
CusparseSafeCall(hipsparseCreate(&sparse_handle));
// Set CUDA device properties
const int num_blocks = gpu_opt.sm_count;
// Construct sparse matrix descriptor
hipsparseMatDescr_t sparse_matrix_descriptor;
hipsparseCreateMatDescr(&sparse_matrix_descriptor);
hipsparseSetMatType(sparse_matrix_descriptor, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(sparse_matrix_descriptor,HIPSPARSE_INDEX_BASE_ZERO);
// Setup some return information if we're working on snapshots
int snap_num = 0;
int snap_interval = 1;
if (opt.return_style == tsnecuda::RETURN_STYLE::SNAPSHOT) {
snap_interval = opt.iterations / (opt.num_snapshots - 1);
}
// Get constants from options
const int num_points = opt.num_points;
const int num_neighbors = (opt.num_neighbors < num_points) ? opt.num_neighbors : num_points;
const float *high_dim_points = opt.points;
const int high_dim = opt.num_dims;
const float perplexity = opt.perplexity;
const float perplexity_search_epsilon = opt.perplexity_search_epsilon;
const float eta = opt.learning_rate;
float momentum = opt.pre_exaggeration_momentum;
float attr_exaggeration = opt.early_exaggeration;
float normalization;
// Allocate host memory
float *knn_squared_distances = new float[num_points * num_neighbors];
memset(knn_squared_distances, 0, num_points * num_neighbors * sizeof(float));
long *knn_indices = new long[num_points * num_neighbors];
// Set cache configs
hipFuncSetCacheConfig(tsnecuda::IntegrationKernel, hipFuncCachePreferL1);
hipFuncSetCacheConfig(tsnecuda::ComputePijxQijKernel, hipFuncCachePreferShared);
GpuErrorCheck(hipDeviceSynchronize());
END_IL_TIMER(_time_initialization);
START_IL_TIMER();
// Compute approximate K Nearest Neighbors and squared distances
tsnecuda::util::KNearestNeighbors(gpu_opt, knn_indices, knn_squared_distances, high_dim_points, high_dim, num_points, num_neighbors);
thrust::device_vector<long> knn_indices_long_device(knn_indices, knn_indices + num_points * num_neighbors);
thrust::device_vector<int> knn_indices_device(num_points * num_neighbors);
tsnecuda::util::PostprocessNeighborIndices(gpu_opt, knn_indices_device, knn_indices_long_device,
num_points, num_neighbors);
// Max-norm the distances to avoid exponentiating by large numbers
thrust::device_vector<float> knn_squared_distances_device(knn_squared_distances,
knn_squared_distances + (num_points * num_neighbors));
tsnecuda::util::MaxNormalizeDeviceVector(knn_squared_distances_device);
END_IL_TIMER(_time_knn);
START_IL_TIMER();
// Search Perplexity
thrust::device_vector<float> pij_non_symmetric_device(num_points * num_neighbors);
tsnecuda::SearchPerplexity(gpu_opt, dense_handle, pij_non_symmetric_device, knn_squared_distances_device,
perplexity, perplexity_search_epsilon, num_points, num_neighbors);
// Clean up memory
knn_squared_distances_device.clear();
knn_squared_distances_device.shrink_to_fit();
knn_indices_long_device.clear();
knn_indices_long_device.shrink_to_fit();
delete[] knn_squared_distances;
delete[] knn_indices;
// Symmetrize the pij matrix
thrust::device_vector<float> sparse_pij_device;
thrust::device_vector<int> pij_row_ptr_device;
thrust::device_vector<int> pij_col_ind_device;
tsnecuda::util::SymmetrizeMatrix(sparse_handle, sparse_pij_device, pij_row_ptr_device,
pij_col_ind_device, pij_non_symmetric_device, knn_indices_device,
opt.magnitude_factor, num_points, num_neighbors);
const int num_nonzero = sparse_pij_device.size();
// Clean up memory
knn_indices_device.clear();
knn_indices_device.shrink_to_fit();
pij_non_symmetric_device.clear();
pij_non_symmetric_device.shrink_to_fit();
// Declare memory
thrust::device_vector<float> repulsive_forces_device(opt.num_points * 2, 0);
thrust::device_vector<float> attractive_forces_device(opt.num_points * 2, 0);
thrust::device_vector<float> gains_device(opt.num_points * 2, 1);
thrust::device_vector<float> old_forces_device(opt.num_points * 2, 0); // for momentum
thrust::device_vector<float> normalization_vec_device(opt.num_points);
thrust::device_vector<float> ones_device(opt.num_points * 2, 1); // This is for reduce summing, etc.
thrust::device_vector<int> coo_indices_device(sparse_pij_device.size()*2);
tsnecuda::util::Csr2Coo(gpu_opt, coo_indices_device, pij_row_ptr_device,
pij_col_ind_device, num_points, num_nonzero);
END_IL_TIMER(_time_symmetry);
START_IL_TIMER();
// Initialize Low-Dim Points
thrust::device_vector<float> points_device(num_points * 2);
thrust::device_vector<float> random_vector_device(points_device.size());
std::default_random_engine generator(opt.random_seed);
std::normal_distribution<float> distribution1(0.0, 1.0);
thrust::host_vector<float> h_points_device(num_points * 2);
// Initialize random noise vector
for (int i = 0; i < h_points_device.size(); i++) h_points_device[i] = 0.001 * distribution1(generator);
thrust::copy(h_points_device.begin(), h_points_device.end(), random_vector_device.begin());
// TODO: this will only work with gaussian init
if (opt.initialization == tsnecuda::TSNE_INIT::UNIFORM) { // Random uniform initialization
points_device = tsnecuda::util::RandomDeviceVectorInRange(generator, points_device.size(), -5, 5);
} else if (opt.initialization == tsnecuda::TSNE_INIT::GAUSSIAN) { // Random gaussian initialization
// Generate some Gaussian noise for the points
for (int i = 0; i < h_points_device.size(); i++) h_points_device[i] = 0.0001 * distribution1(generator);
thrust::copy(h_points_device.begin(), h_points_device.end(), points_device.begin());
} else if (opt.initialization == tsnecuda::TSNE_INIT::RESUME) { // Preinit from vector
// Load from vector
if(opt.preinit_data != nullptr) {
thrust::copy(opt.preinit_data, opt.preinit_data + points_device.size(), points_device.begin());
} else {
std::cerr << "E: Invalid initialization. Initialization points are null." << std::endl;
exit(1);
}
} else if (opt.initialization == tsnecuda::TSNE_INIT::VECTOR) { // Preinit from vector points only
// Copy the pre-init data
if(opt.preinit_data != nullptr) {
thrust::copy(opt.preinit_data, opt.preinit_data + points_device.size(), points_device.begin());
} else {
std::cerr << "E: Invalid initialization. Initialization points are null." << std::endl;
exit(1);
}
} else { // Invalid initialization
std::cerr << "E: Invalid initialization type specified." << std::endl;
exit(1);
}
END_IL_TIMER(_time_init_low_dim);
START_IL_TIMER();
// FIT-TNSE Parameters
int n_interpolation_points = 3;
// float intervals_per_integer = 1;
int min_num_intervals = 50;
int N = num_points;
// int D = 2;
// The number of "charges" or s+2 sums i.e. number of kernel sums
int n_terms = 4;
int n_boxes_per_dim = min_num_intervals;
// FFTW works faster on numbers that can be written as 2^a 3^b 5^c 7^d
// 11^e 13^f, where e+f is either 0 or 1, and the other exponents are
// arbitrary
int allowed_n_boxes_per_dim[20] = {25,36, 50, 55, 60, 65, 70, 75, 80, 85, 90, 96, 100, 110, 120, 130, 140,150, 175, 200};
if ( n_boxes_per_dim < allowed_n_boxes_per_dim[19] ) {
//Round up to nearest grid point
int chosen_i;
for (chosen_i =0; allowed_n_boxes_per_dim[chosen_i]< n_boxes_per_dim; chosen_i++);
n_boxes_per_dim = allowed_n_boxes_per_dim[chosen_i];
}
int n_total_boxes = n_boxes_per_dim * n_boxes_per_dim;
int total_interpolation_points = n_total_boxes * n_interpolation_points * n_interpolation_points;
int n_fft_coeffs_half = n_interpolation_points * n_boxes_per_dim;
int n_fft_coeffs = 2 * n_interpolation_points * n_boxes_per_dim;
int n_interpolation_points_1d = n_interpolation_points * n_boxes_per_dim;
// FIT-TSNE Device Vectors
thrust::device_vector<int> point_box_idx_device(N);
thrust::device_vector<float> x_in_box_device(N);
thrust::device_vector<float> y_in_box_device(N);
thrust::device_vector<float> y_tilde_values(total_interpolation_points * n_terms);
thrust::device_vector<float> x_interpolated_values_device(N * n_interpolation_points);
thrust::device_vector<float> y_interpolated_values_device(N * n_interpolation_points);
thrust::device_vector<float> potentialsQij_device(N * n_terms);
thrust::device_vector<float> w_coefficients_device(total_interpolation_points * n_terms);
thrust::device_vector<float> all_interpolated_values_device(
n_terms * n_interpolation_points * n_interpolation_points * N);
thrust::device_vector<float> output_values(
n_terms * n_interpolation_points * n_interpolation_points * N);
thrust::device_vector<int> all_interpolated_indices(
n_terms * n_interpolation_points * n_interpolation_points * N);
thrust::device_vector<int> output_indices(
n_terms * n_interpolation_points * n_interpolation_points * N);
thrust::device_vector<float> chargesQij_device(N * n_terms);
thrust::device_vector<float> box_lower_bounds_device(2 * n_total_boxes);
thrust::device_vector<float> box_upper_bounds_device(2 * n_total_boxes);
thrust::device_vector<float> kernel_tilde_device(n_fft_coeffs * n_fft_coeffs);
thrust::device_vector<thrust::complex<float>> fft_kernel_tilde_device(2 * n_interpolation_points_1d * 2 * n_interpolation_points_1d);
thrust::device_vector<float> fft_input(n_terms * n_fft_coeffs * n_fft_coeffs);
thrust::device_vector<thrust::complex<float>> fft_w_coefficients(n_terms * n_fft_coeffs * (n_fft_coeffs / 2 + 1));
thrust::device_vector<float> fft_output(n_terms * n_fft_coeffs * n_fft_coeffs);
// Easier to compute denominator on CPU, so we should just calculate y_tilde_spacing on CPU also
float h = 1 / (float) n_interpolation_points;
float y_tilde_spacings[n_interpolation_points];
y_tilde_spacings[0] = h / 2;
for (int i = 1; i < n_interpolation_points; i++) {
y_tilde_spacings[i] = y_tilde_spacings[i - 1] + h;
}
float denominator[n_interpolation_points];
for (int i = 0; i < n_interpolation_points; i++) {
denominator[i] = 1;
for (int j = 0; j < n_interpolation_points; j++) {
if (i != j) {
denominator[i] *= y_tilde_spacings[i] - y_tilde_spacings[j];
}
}
}
thrust::device_vector<float> y_tilde_spacings_device(y_tilde_spacings, y_tilde_spacings + n_interpolation_points);
thrust::device_vector<float> denominator_device(denominator, denominator + n_interpolation_points);
// Create the FFT Handles
hipfftHandle plan_kernel_tilde, plan_dft, plan_idft;;
CufftSafeCall(hipfftCreate(&plan_kernel_tilde));
CufftSafeCall(hipfftCreate(&plan_dft));
CufftSafeCall(hipfftCreate(&plan_idft));
size_t work_size, work_size_dft, work_size_idft;
int fft_dimensions[2] = {n_fft_coeffs, n_fft_coeffs};
CufftSafeCall(hipfftMakePlan2d(plan_kernel_tilde, fft_dimensions[0], fft_dimensions[1], HIPFFT_R2C, &work_size));
CufftSafeCall(hipfftMakePlanMany(plan_dft, 2, fft_dimensions,
NULL, 1, n_fft_coeffs * n_fft_coeffs,
NULL, 1, n_fft_coeffs * (n_fft_coeffs / 2 + 1),
HIPFFT_R2C, n_terms, &work_size_dft));
CufftSafeCall(hipfftMakePlanMany(plan_idft, 2, fft_dimensions,
NULL, 1, n_fft_coeffs * (n_fft_coeffs / 2 + 1),
NULL, 1, n_fft_coeffs * n_fft_coeffs,
HIPFFT_C2R, n_terms, &work_size_idft));
// Dump file
float *host_ys = nullptr;
std::ofstream dump_file;
if (opt.get_dump_points()) {
dump_file.open(opt.get_dump_file());
host_ys = new float[num_points * 2];
dump_file << num_points << " " << 2 << std::endl;
}
#ifndef NO_ZMQ
bool send_zmq = opt.get_use_interactive();
zmq::context_t context(1);
zmq::socket_t publisher(context, ZMQ_REQ);
if (opt.get_use_interactive()) {
// Try to connect to the socket
if (opt.verbosity >= 1)
std::cout << "Initializing Connection...." << std::endl;
publisher.setsockopt(ZMQ_RCVTIMEO, opt.get_viz_timeout());
publisher.setsockopt(ZMQ_SNDTIMEO, opt.get_viz_timeout());
if (opt.verbosity >= 1)
std::cout << "Waiting for connection to visualization for 10 secs...." << std::endl;
publisher.connect(opt.get_viz_server());
// Send the number of points we should be expecting to the server
std::string message = std::to_string(opt.num_points);
send_zmq = publisher.send(message.c_str(), message.length());
// Wait for server reply
zmq::message_t request;
send_zmq = publisher.recv (&request);
// If there's a time-out, don't bother.
if (send_zmq) {
if (opt.verbosity >= 1)
std::cout << "Visualization connected!" << std::endl;
} else {
std::cout << "No Visualization Terminal, continuing..." << std::endl;
send_zmq = false;
}
}
#else
if (opt.get_use_interactive())
std::cout << "This version is not built with ZMQ for interative viz. Rebuild with WITH_ZMQ=TRUE for viz." << std::endl;
#endif
END_IL_TIMER(_time_init_fft);
// Support for infinite iteration
for (size_t step = 0; step != opt.iterations; step++) {
START_IL_TIMER();
float fill_value = 0;
thrust::fill(w_coefficients_device.begin(), w_coefficients_device.end(), fill_value);
thrust::fill(potentialsQij_device.begin(), potentialsQij_device.end(), fill_value);
// Setup learning rate schedule
if (step == opt.force_magnify_iters) {
momentum = opt.post_exaggeration_momentum;
attr_exaggeration = 1.0f;
}
END_IL_TIMER(_time_other);
// Prepare the terms that we'll use to compute the sum i.e. the repulsive forces
START_IL_TIMER();
tsnecuda::ComputeChargesQij(chargesQij_device, points_device, num_points, num_points - 1, n_terms);
END_IL_TIMER(_time_compute_charges);
// Compute Minimax elements
START_IL_TIMER();
auto minimax_iter = thrust::minmax_element(points_device.begin(), points_device.end());
float min_coord = minimax_iter.first[0];
float max_coord = minimax_iter.second[0];
// Compute the number of boxes in a single dimension and the total number of boxes in 2d
// auto n_boxes_per_dim = static_cast<int>(fmax(min_num_intervals, (max_coord - min_coord) / intervals_per_integer));
tsnecuda::PrecomputeFFT2D(
plan_kernel_tilde, max_coord, min_coord, max_coord, min_coord, n_boxes_per_dim, n_interpolation_points,
box_lower_bounds_device, box_upper_bounds_device, kernel_tilde_device,
fft_kernel_tilde_device);
float box_width = ((max_coord - min_coord) / (float) n_boxes_per_dim);
END_IL_TIMER(_time_precompute_2d);
START_IL_TIMER();
tsnecuda::NbodyFFT2D(
plan_dft, plan_idft,
N, n_terms, n_boxes_per_dim, n_interpolation_points,
fft_kernel_tilde_device, n_total_boxes,
total_interpolation_points, min_coord, box_width, n_fft_coeffs_half, n_fft_coeffs, num_points - 1,
fft_input, fft_w_coefficients, fft_output,
point_box_idx_device, x_in_box_device, y_in_box_device, points_device,
box_lower_bounds_device, y_tilde_spacings_device, denominator_device, y_tilde_values,
all_interpolated_values_device, output_values, all_interpolated_indices,
output_indices, w_coefficients_device, chargesQij_device, x_interpolated_values_device,
y_interpolated_values_device, potentialsQij_device);
END_IL_TIMER(_time_nbodyfft);
START_IL_TIMER();
// Make the negative term, or F_rep in the equation 3 of the paper
normalization = tsnecuda::ComputeRepulsiveForces(
repulsive_forces_device, normalization_vec_device, points_device,
potentialsQij_device, num_points, num_points - 1, n_terms);
END_IL_TIMER(_time_norm);
START_IL_TIMER();
// Calculate Attractive Forces
tsnecuda::ComputeAttractiveForces(gpu_opt,
sparse_handle,
sparse_matrix_descriptor,
attractive_forces_device,
sparse_pij_device,
pij_row_ptr_device,
pij_col_ind_device,
coo_indices_device,
points_device,
ones_device,
num_points - 1,
num_points,
num_nonzero);
END_IL_TIMER(_time_attr);
START_IL_TIMER();
// Apply Forces
tsnecuda::ApplyForces(gpu_opt,
points_device,
attractive_forces_device,
repulsive_forces_device,
gains_device,
old_forces_device,
eta,
normalization,
momentum,
attr_exaggeration,
num_points - 1,
num_points,
num_blocks);
// // Compute the gradient norm
tsnecuda::util::SquareDeviceVector(attractive_forces_device, old_forces_device);
thrust::transform(attractive_forces_device.begin(), attractive_forces_device.begin()+num_points,
attractive_forces_device.begin()+num_points, attractive_forces_device.begin(),
thrust::plus<float>());
tsnecuda::util::SqrtDeviceVector(attractive_forces_device, attractive_forces_device);
float grad_norm = thrust::reduce(
attractive_forces_device.begin(), attractive_forces_device.begin() + num_points,
0.0f, thrust::plus<float>()) / num_points;
thrust::fill(attractive_forces_device.begin(), attractive_forces_device.end(), 0.0f);
if (grad_norm < opt.min_gradient_norm) {
if (opt.verbosity >= 1) std::cout << "Reached minimum gradient norm: " << grad_norm << std::endl;
break;
}
if (opt.verbosity >= 1 && step % opt.print_interval == 0) {
std::cout << "[Step " << step << "] Avg. Gradient Norm: " << grad_norm << std::endl;
}
END_IL_TIMER(_time_apply_forces);
#ifndef NO_ZMQ
if (send_zmq) {
zmq::message_t message(sizeof(float)*opt.num_points*2);
thrust::copy(points_device.begin(), points_device.end(), static_cast<float*>(message.data()));
bool res = false;
res = publisher.send(message);
zmq::message_t request;
res = publisher.recv(&request);
if (!res) {
std::cout << "Server Disconnected, Not sending anymore for this session." << std::endl;
}
send_zmq = res;
}
#endif
if (opt.get_dump_points() && step % opt.get_dump_interval() == 0) {
thrust::copy(points_device.begin(), points_device.end(), host_ys);
for (int i = 0; i < opt.num_points; i++) {
dump_file << host_ys[i] << " " << host_ys[i + num_points] << std::endl;
}
}
// // Handle snapshoting
if (opt.return_style == tsnecuda::RETURN_STYLE::SNAPSHOT && step % snap_interval == 0 && opt.return_data != nullptr) {
thrust::copy(points_device.begin(),
points_device.end(),
snap_num*opt.num_points*2 + opt.return_data);
snap_num += 1;
}
}
CufftSafeCall(hipfftDestroy(plan_kernel_tilde));
CufftSafeCall(hipfftDestroy(plan_dft));
CufftSafeCall(hipfftDestroy(plan_idft));
if (opt.verbosity > 0) {
PRINT_IL_TIMER(_time_initialization);
PRINT_IL_TIMER(_time_knn);
PRINT_IL_TIMER(_time_symmetry);
PRINT_IL_TIMER(_time_init_low_dim);
PRINT_IL_TIMER(_time_init_fft);
PRINT_IL_TIMER(_time_compute_charges);
PRINT_IL_TIMER(_time_precompute_2d);
PRINT_IL_TIMER(_time_nbodyfft);
PRINT_IL_TIMER(_time_norm);
PRINT_IL_TIMER(_time_attr);
PRINT_IL_TIMER(_time_apply_forces);
PRINT_IL_TIMER(_time_other);
PRINT_IL_TIMER(total_time);
}
// Clean up the dump file if we are dumping points
if (opt.get_dump_points()){
delete[] host_ys;
dump_file.close();
}
// Handle a once off return type
if (opt.return_style == tsnecuda::RETURN_STYLE::ONCE && opt.return_data != nullptr) {
thrust::copy(points_device.begin(), points_device.end(), opt.return_data);
}
// Handle snapshoting
if (opt.return_style == tsnecuda::RETURN_STYLE::SNAPSHOT && opt.return_data != nullptr) {
thrust::copy(points_device.begin(), points_device.end(), snap_num*opt.num_points*2 + opt.return_data);
}
// Return some final values
opt.trained = true;
opt.trained_norm = normalization;
return;
}
| 8cfa417b1950fe12102846b38bcdfa8399c0d4d8.cu | /*
Compute t-SNE via Barnes-Hut for NlogN time.
*/
#include "include/fit_tsne.h"
#include <chrono>
#define START_IL_TIMER() start = std::chrono::high_resolution_clock::now();
#define END_IL_TIMER(x) stop = std::chrono::high_resolution_clock::now(); duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start); x += duration; total_time += duration;
#define PRINT_IL_TIMER(x) std::cout << #x << ": " << ((float) x.count()) / 1000000.0 << "s" << std::endl
void tsnecuda::RunTsne(tsnecuda::Options &opt,
tsnecuda::GpuOptions &gpu_opt)
{
auto start = std::chrono::high_resolution_clock::now();
auto stop = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start);
auto total_time = duration;
auto _time_initialization = duration;
auto _time_knn = duration;
auto _time_symmetry = duration;
auto _time_init_low_dim = duration;
auto _time_init_fft = duration;
auto _time_precompute_2d = duration;
auto _time_nbodyfft = duration;
auto _time_compute_charges = duration;
auto _time_other = duration;
auto _time_norm = duration;
auto _time_attr = duration;
auto _time_apply_forces = duration;
// Check the validity of the options file
if (!opt.validate()) {
std::cout << "E: Invalid options file. Terminating." << std::endl;
return;
}
START_IL_TIMER();
// Construct the handles
cublasHandle_t dense_handle;
CublasSafeCall(cublasCreate(&dense_handle));
cusparseHandle_t sparse_handle;
CusparseSafeCall(cusparseCreate(&sparse_handle));
// Set CUDA device properties
const int num_blocks = gpu_opt.sm_count;
// Construct sparse matrix descriptor
cusparseMatDescr_t sparse_matrix_descriptor;
cusparseCreateMatDescr(&sparse_matrix_descriptor);
cusparseSetMatType(sparse_matrix_descriptor, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(sparse_matrix_descriptor,CUSPARSE_INDEX_BASE_ZERO);
// Setup some return information if we're working on snapshots
int snap_num = 0;
int snap_interval = 1;
if (opt.return_style == tsnecuda::RETURN_STYLE::SNAPSHOT) {
snap_interval = opt.iterations / (opt.num_snapshots - 1);
}
// Get constants from options
const int num_points = opt.num_points;
const int num_neighbors = (opt.num_neighbors < num_points) ? opt.num_neighbors : num_points;
const float *high_dim_points = opt.points;
const int high_dim = opt.num_dims;
const float perplexity = opt.perplexity;
const float perplexity_search_epsilon = opt.perplexity_search_epsilon;
const float eta = opt.learning_rate;
float momentum = opt.pre_exaggeration_momentum;
float attr_exaggeration = opt.early_exaggeration;
float normalization;
// Allocate host memory
float *knn_squared_distances = new float[num_points * num_neighbors];
memset(knn_squared_distances, 0, num_points * num_neighbors * sizeof(float));
long *knn_indices = new long[num_points * num_neighbors];
// Set cache configs
cudaFuncSetCacheConfig(tsnecuda::IntegrationKernel, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(tsnecuda::ComputePijxQijKernel, cudaFuncCachePreferShared);
GpuErrorCheck(cudaDeviceSynchronize());
END_IL_TIMER(_time_initialization);
START_IL_TIMER();
// Compute approximate K Nearest Neighbors and squared distances
tsnecuda::util::KNearestNeighbors(gpu_opt, knn_indices, knn_squared_distances, high_dim_points, high_dim, num_points, num_neighbors);
thrust::device_vector<long> knn_indices_long_device(knn_indices, knn_indices + num_points * num_neighbors);
thrust::device_vector<int> knn_indices_device(num_points * num_neighbors);
tsnecuda::util::PostprocessNeighborIndices(gpu_opt, knn_indices_device, knn_indices_long_device,
num_points, num_neighbors);
// Max-norm the distances to avoid exponentiating by large numbers
thrust::device_vector<float> knn_squared_distances_device(knn_squared_distances,
knn_squared_distances + (num_points * num_neighbors));
tsnecuda::util::MaxNormalizeDeviceVector(knn_squared_distances_device);
END_IL_TIMER(_time_knn);
START_IL_TIMER();
// Search Perplexity
thrust::device_vector<float> pij_non_symmetric_device(num_points * num_neighbors);
tsnecuda::SearchPerplexity(gpu_opt, dense_handle, pij_non_symmetric_device, knn_squared_distances_device,
perplexity, perplexity_search_epsilon, num_points, num_neighbors);
// Clean up memory
knn_squared_distances_device.clear();
knn_squared_distances_device.shrink_to_fit();
knn_indices_long_device.clear();
knn_indices_long_device.shrink_to_fit();
delete[] knn_squared_distances;
delete[] knn_indices;
// Symmetrize the pij matrix
thrust::device_vector<float> sparse_pij_device;
thrust::device_vector<int> pij_row_ptr_device;
thrust::device_vector<int> pij_col_ind_device;
tsnecuda::util::SymmetrizeMatrix(sparse_handle, sparse_pij_device, pij_row_ptr_device,
pij_col_ind_device, pij_non_symmetric_device, knn_indices_device,
opt.magnitude_factor, num_points, num_neighbors);
const int num_nonzero = sparse_pij_device.size();
// Clean up memory
knn_indices_device.clear();
knn_indices_device.shrink_to_fit();
pij_non_symmetric_device.clear();
pij_non_symmetric_device.shrink_to_fit();
// Declare memory
thrust::device_vector<float> repulsive_forces_device(opt.num_points * 2, 0);
thrust::device_vector<float> attractive_forces_device(opt.num_points * 2, 0);
thrust::device_vector<float> gains_device(opt.num_points * 2, 1);
thrust::device_vector<float> old_forces_device(opt.num_points * 2, 0); // for momentum
thrust::device_vector<float> normalization_vec_device(opt.num_points);
thrust::device_vector<float> ones_device(opt.num_points * 2, 1); // This is for reduce summing, etc.
thrust::device_vector<int> coo_indices_device(sparse_pij_device.size()*2);
tsnecuda::util::Csr2Coo(gpu_opt, coo_indices_device, pij_row_ptr_device,
pij_col_ind_device, num_points, num_nonzero);
END_IL_TIMER(_time_symmetry);
START_IL_TIMER();
// Initialize Low-Dim Points
thrust::device_vector<float> points_device(num_points * 2);
thrust::device_vector<float> random_vector_device(points_device.size());
std::default_random_engine generator(opt.random_seed);
std::normal_distribution<float> distribution1(0.0, 1.0);
thrust::host_vector<float> h_points_device(num_points * 2);
// Initialize random noise vector
for (int i = 0; i < h_points_device.size(); i++) h_points_device[i] = 0.001 * distribution1(generator);
thrust::copy(h_points_device.begin(), h_points_device.end(), random_vector_device.begin());
// TODO: this will only work with gaussian init
if (opt.initialization == tsnecuda::TSNE_INIT::UNIFORM) { // Random uniform initialization
points_device = tsnecuda::util::RandomDeviceVectorInRange(generator, points_device.size(), -5, 5);
} else if (opt.initialization == tsnecuda::TSNE_INIT::GAUSSIAN) { // Random gaussian initialization
// Generate some Gaussian noise for the points
for (int i = 0; i < h_points_device.size(); i++) h_points_device[i] = 0.0001 * distribution1(generator);
thrust::copy(h_points_device.begin(), h_points_device.end(), points_device.begin());
} else if (opt.initialization == tsnecuda::TSNE_INIT::RESUME) { // Preinit from vector
// Load from vector
if(opt.preinit_data != nullptr) {
thrust::copy(opt.preinit_data, opt.preinit_data + points_device.size(), points_device.begin());
} else {
std::cerr << "E: Invalid initialization. Initialization points are null." << std::endl;
exit(1);
}
} else if (opt.initialization == tsnecuda::TSNE_INIT::VECTOR) { // Preinit from vector points only
// Copy the pre-init data
if(opt.preinit_data != nullptr) {
thrust::copy(opt.preinit_data, opt.preinit_data + points_device.size(), points_device.begin());
} else {
std::cerr << "E: Invalid initialization. Initialization points are null." << std::endl;
exit(1);
}
} else { // Invalid initialization
std::cerr << "E: Invalid initialization type specified." << std::endl;
exit(1);
}
END_IL_TIMER(_time_init_low_dim);
START_IL_TIMER();
// FIT-TNSE Parameters
int n_interpolation_points = 3;
// float intervals_per_integer = 1;
int min_num_intervals = 50;
int N = num_points;
// int D = 2;
// The number of "charges" or s+2 sums i.e. number of kernel sums
int n_terms = 4;
int n_boxes_per_dim = min_num_intervals;
// FFTW works faster on numbers that can be written as 2^a 3^b 5^c 7^d
// 11^e 13^f, where e+f is either 0 or 1, and the other exponents are
// arbitrary
int allowed_n_boxes_per_dim[20] = {25,36, 50, 55, 60, 65, 70, 75, 80, 85, 90, 96, 100, 110, 120, 130, 140,150, 175, 200};
if ( n_boxes_per_dim < allowed_n_boxes_per_dim[19] ) {
//Round up to nearest grid point
int chosen_i;
for (chosen_i =0; allowed_n_boxes_per_dim[chosen_i]< n_boxes_per_dim; chosen_i++);
n_boxes_per_dim = allowed_n_boxes_per_dim[chosen_i];
}
int n_total_boxes = n_boxes_per_dim * n_boxes_per_dim;
int total_interpolation_points = n_total_boxes * n_interpolation_points * n_interpolation_points;
int n_fft_coeffs_half = n_interpolation_points * n_boxes_per_dim;
int n_fft_coeffs = 2 * n_interpolation_points * n_boxes_per_dim;
int n_interpolation_points_1d = n_interpolation_points * n_boxes_per_dim;
// FIT-TSNE Device Vectors
thrust::device_vector<int> point_box_idx_device(N);
thrust::device_vector<float> x_in_box_device(N);
thrust::device_vector<float> y_in_box_device(N);
thrust::device_vector<float> y_tilde_values(total_interpolation_points * n_terms);
thrust::device_vector<float> x_interpolated_values_device(N * n_interpolation_points);
thrust::device_vector<float> y_interpolated_values_device(N * n_interpolation_points);
thrust::device_vector<float> potentialsQij_device(N * n_terms);
thrust::device_vector<float> w_coefficients_device(total_interpolation_points * n_terms);
thrust::device_vector<float> all_interpolated_values_device(
n_terms * n_interpolation_points * n_interpolation_points * N);
thrust::device_vector<float> output_values(
n_terms * n_interpolation_points * n_interpolation_points * N);
thrust::device_vector<int> all_interpolated_indices(
n_terms * n_interpolation_points * n_interpolation_points * N);
thrust::device_vector<int> output_indices(
n_terms * n_interpolation_points * n_interpolation_points * N);
thrust::device_vector<float> chargesQij_device(N * n_terms);
thrust::device_vector<float> box_lower_bounds_device(2 * n_total_boxes);
thrust::device_vector<float> box_upper_bounds_device(2 * n_total_boxes);
thrust::device_vector<float> kernel_tilde_device(n_fft_coeffs * n_fft_coeffs);
thrust::device_vector<thrust::complex<float>> fft_kernel_tilde_device(2 * n_interpolation_points_1d * 2 * n_interpolation_points_1d);
thrust::device_vector<float> fft_input(n_terms * n_fft_coeffs * n_fft_coeffs);
thrust::device_vector<thrust::complex<float>> fft_w_coefficients(n_terms * n_fft_coeffs * (n_fft_coeffs / 2 + 1));
thrust::device_vector<float> fft_output(n_terms * n_fft_coeffs * n_fft_coeffs);
// Easier to compute denominator on CPU, so we should just calculate y_tilde_spacing on CPU also
float h = 1 / (float) n_interpolation_points;
float y_tilde_spacings[n_interpolation_points];
y_tilde_spacings[0] = h / 2;
for (int i = 1; i < n_interpolation_points; i++) {
y_tilde_spacings[i] = y_tilde_spacings[i - 1] + h;
}
float denominator[n_interpolation_points];
for (int i = 0; i < n_interpolation_points; i++) {
denominator[i] = 1;
for (int j = 0; j < n_interpolation_points; j++) {
if (i != j) {
denominator[i] *= y_tilde_spacings[i] - y_tilde_spacings[j];
}
}
}
thrust::device_vector<float> y_tilde_spacings_device(y_tilde_spacings, y_tilde_spacings + n_interpolation_points);
thrust::device_vector<float> denominator_device(denominator, denominator + n_interpolation_points);
// Create the FFT Handles
cufftHandle plan_kernel_tilde, plan_dft, plan_idft;;
CufftSafeCall(cufftCreate(&plan_kernel_tilde));
CufftSafeCall(cufftCreate(&plan_dft));
CufftSafeCall(cufftCreate(&plan_idft));
size_t work_size, work_size_dft, work_size_idft;
int fft_dimensions[2] = {n_fft_coeffs, n_fft_coeffs};
CufftSafeCall(cufftMakePlan2d(plan_kernel_tilde, fft_dimensions[0], fft_dimensions[1], CUFFT_R2C, &work_size));
CufftSafeCall(cufftMakePlanMany(plan_dft, 2, fft_dimensions,
NULL, 1, n_fft_coeffs * n_fft_coeffs,
NULL, 1, n_fft_coeffs * (n_fft_coeffs / 2 + 1),
CUFFT_R2C, n_terms, &work_size_dft));
CufftSafeCall(cufftMakePlanMany(plan_idft, 2, fft_dimensions,
NULL, 1, n_fft_coeffs * (n_fft_coeffs / 2 + 1),
NULL, 1, n_fft_coeffs * n_fft_coeffs,
CUFFT_C2R, n_terms, &work_size_idft));
// Dump file
float *host_ys = nullptr;
std::ofstream dump_file;
if (opt.get_dump_points()) {
dump_file.open(opt.get_dump_file());
host_ys = new float[num_points * 2];
dump_file << num_points << " " << 2 << std::endl;
}
#ifndef NO_ZMQ
bool send_zmq = opt.get_use_interactive();
zmq::context_t context(1);
zmq::socket_t publisher(context, ZMQ_REQ);
if (opt.get_use_interactive()) {
// Try to connect to the socket
if (opt.verbosity >= 1)
std::cout << "Initializing Connection...." << std::endl;
publisher.setsockopt(ZMQ_RCVTIMEO, opt.get_viz_timeout());
publisher.setsockopt(ZMQ_SNDTIMEO, opt.get_viz_timeout());
if (opt.verbosity >= 1)
std::cout << "Waiting for connection to visualization for 10 secs...." << std::endl;
publisher.connect(opt.get_viz_server());
// Send the number of points we should be expecting to the server
std::string message = std::to_string(opt.num_points);
send_zmq = publisher.send(message.c_str(), message.length());
// Wait for server reply
zmq::message_t request;
send_zmq = publisher.recv (&request);
// If there's a time-out, don't bother.
if (send_zmq) {
if (opt.verbosity >= 1)
std::cout << "Visualization connected!" << std::endl;
} else {
std::cout << "No Visualization Terminal, continuing..." << std::endl;
send_zmq = false;
}
}
#else
if (opt.get_use_interactive())
std::cout << "This version is not built with ZMQ for interative viz. Rebuild with WITH_ZMQ=TRUE for viz." << std::endl;
#endif
END_IL_TIMER(_time_init_fft);
// Support for infinite iteration
for (size_t step = 0; step != opt.iterations; step++) {
START_IL_TIMER();
float fill_value = 0;
thrust::fill(w_coefficients_device.begin(), w_coefficients_device.end(), fill_value);
thrust::fill(potentialsQij_device.begin(), potentialsQij_device.end(), fill_value);
// Setup learning rate schedule
if (step == opt.force_magnify_iters) {
momentum = opt.post_exaggeration_momentum;
attr_exaggeration = 1.0f;
}
END_IL_TIMER(_time_other);
// Prepare the terms that we'll use to compute the sum i.e. the repulsive forces
START_IL_TIMER();
tsnecuda::ComputeChargesQij(chargesQij_device, points_device, num_points, num_points - 1, n_terms);
END_IL_TIMER(_time_compute_charges);
// Compute Minimax elements
START_IL_TIMER();
auto minimax_iter = thrust::minmax_element(points_device.begin(), points_device.end());
float min_coord = minimax_iter.first[0];
float max_coord = minimax_iter.second[0];
// Compute the number of boxes in a single dimension and the total number of boxes in 2d
// auto n_boxes_per_dim = static_cast<int>(fmax(min_num_intervals, (max_coord - min_coord) / intervals_per_integer));
tsnecuda::PrecomputeFFT2D(
plan_kernel_tilde, max_coord, min_coord, max_coord, min_coord, n_boxes_per_dim, n_interpolation_points,
box_lower_bounds_device, box_upper_bounds_device, kernel_tilde_device,
fft_kernel_tilde_device);
float box_width = ((max_coord - min_coord) / (float) n_boxes_per_dim);
END_IL_TIMER(_time_precompute_2d);
START_IL_TIMER();
tsnecuda::NbodyFFT2D(
plan_dft, plan_idft,
N, n_terms, n_boxes_per_dim, n_interpolation_points,
fft_kernel_tilde_device, n_total_boxes,
total_interpolation_points, min_coord, box_width, n_fft_coeffs_half, n_fft_coeffs, num_points - 1,
fft_input, fft_w_coefficients, fft_output,
point_box_idx_device, x_in_box_device, y_in_box_device, points_device,
box_lower_bounds_device, y_tilde_spacings_device, denominator_device, y_tilde_values,
all_interpolated_values_device, output_values, all_interpolated_indices,
output_indices, w_coefficients_device, chargesQij_device, x_interpolated_values_device,
y_interpolated_values_device, potentialsQij_device);
END_IL_TIMER(_time_nbodyfft);
START_IL_TIMER();
// Make the negative term, or F_rep in the equation 3 of the paper
normalization = tsnecuda::ComputeRepulsiveForces(
repulsive_forces_device, normalization_vec_device, points_device,
potentialsQij_device, num_points, num_points - 1, n_terms);
END_IL_TIMER(_time_norm);
START_IL_TIMER();
// Calculate Attractive Forces
tsnecuda::ComputeAttractiveForces(gpu_opt,
sparse_handle,
sparse_matrix_descriptor,
attractive_forces_device,
sparse_pij_device,
pij_row_ptr_device,
pij_col_ind_device,
coo_indices_device,
points_device,
ones_device,
num_points - 1,
num_points,
num_nonzero);
END_IL_TIMER(_time_attr);
START_IL_TIMER();
// Apply Forces
tsnecuda::ApplyForces(gpu_opt,
points_device,
attractive_forces_device,
repulsive_forces_device,
gains_device,
old_forces_device,
eta,
normalization,
momentum,
attr_exaggeration,
num_points - 1,
num_points,
num_blocks);
// // Compute the gradient norm
tsnecuda::util::SquareDeviceVector(attractive_forces_device, old_forces_device);
thrust::transform(attractive_forces_device.begin(), attractive_forces_device.begin()+num_points,
attractive_forces_device.begin()+num_points, attractive_forces_device.begin(),
thrust::plus<float>());
tsnecuda::util::SqrtDeviceVector(attractive_forces_device, attractive_forces_device);
float grad_norm = thrust::reduce(
attractive_forces_device.begin(), attractive_forces_device.begin() + num_points,
0.0f, thrust::plus<float>()) / num_points;
thrust::fill(attractive_forces_device.begin(), attractive_forces_device.end(), 0.0f);
if (grad_norm < opt.min_gradient_norm) {
if (opt.verbosity >= 1) std::cout << "Reached minimum gradient norm: " << grad_norm << std::endl;
break;
}
if (opt.verbosity >= 1 && step % opt.print_interval == 0) {
std::cout << "[Step " << step << "] Avg. Gradient Norm: " << grad_norm << std::endl;
}
END_IL_TIMER(_time_apply_forces);
#ifndef NO_ZMQ
if (send_zmq) {
zmq::message_t message(sizeof(float)*opt.num_points*2);
thrust::copy(points_device.begin(), points_device.end(), static_cast<float*>(message.data()));
bool res = false;
res = publisher.send(message);
zmq::message_t request;
res = publisher.recv(&request);
if (!res) {
std::cout << "Server Disconnected, Not sending anymore for this session." << std::endl;
}
send_zmq = res;
}
#endif
if (opt.get_dump_points() && step % opt.get_dump_interval() == 0) {
thrust::copy(points_device.begin(), points_device.end(), host_ys);
for (int i = 0; i < opt.num_points; i++) {
dump_file << host_ys[i] << " " << host_ys[i + num_points] << std::endl;
}
}
// // Handle snapshoting
if (opt.return_style == tsnecuda::RETURN_STYLE::SNAPSHOT && step % snap_interval == 0 && opt.return_data != nullptr) {
thrust::copy(points_device.begin(),
points_device.end(),
snap_num*opt.num_points*2 + opt.return_data);
snap_num += 1;
}
}
CufftSafeCall(cufftDestroy(plan_kernel_tilde));
CufftSafeCall(cufftDestroy(plan_dft));
CufftSafeCall(cufftDestroy(plan_idft));
if (opt.verbosity > 0) {
PRINT_IL_TIMER(_time_initialization);
PRINT_IL_TIMER(_time_knn);
PRINT_IL_TIMER(_time_symmetry);
PRINT_IL_TIMER(_time_init_low_dim);
PRINT_IL_TIMER(_time_init_fft);
PRINT_IL_TIMER(_time_compute_charges);
PRINT_IL_TIMER(_time_precompute_2d);
PRINT_IL_TIMER(_time_nbodyfft);
PRINT_IL_TIMER(_time_norm);
PRINT_IL_TIMER(_time_attr);
PRINT_IL_TIMER(_time_apply_forces);
PRINT_IL_TIMER(_time_other);
PRINT_IL_TIMER(total_time);
}
// Clean up the dump file if we are dumping points
if (opt.get_dump_points()){
delete[] host_ys;
dump_file.close();
}
// Handle a once off return type
if (opt.return_style == tsnecuda::RETURN_STYLE::ONCE && opt.return_data != nullptr) {
thrust::copy(points_device.begin(), points_device.end(), opt.return_data);
}
// Handle snapshoting
if (opt.return_style == tsnecuda::RETURN_STYLE::SNAPSHOT && opt.return_data != nullptr) {
thrust::copy(points_device.begin(), points_device.end(), snap_num*opt.num_points*2 + opt.return_data);
}
// Return some final values
opt.trained = true;
opt.trained_norm = normalization;
return;
}
|
a9d8abbfc85aae519949e7221d610d92d3f90cae.hip | // !!! This is a file automatically generated by hipify!!!
/*--------------------------------------------------------------------------*\
Copyright (c) 2008-2012, Danny Ruijters. All rights reserved.
http://www.dannyruijters.nl/cubicinterpolation/
This file is part of CUDA Cubic B-Spline Interpolation (CI).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
When using this code in a scientific project, please cite one or all of the
following papers:
* Daniel Ruijters and Philippe Thvenaz,
GPU Prefilter for Accurate Cubic B-Spline Interpolation,
The Computer Journal, vol. 55, no. 1, pp. 15-20, January 2012.
http://dannyruijters.nl/docs/cudaPrefilter3.pdf
* Daniel Ruijters, Bart M. ter Haar Romeny, and Paul Suetens,
Efficient GPU-Based Texture Interpolation using Uniform B-Splines,
Journal of Graphics Tools, vol. 13, no. 4, pp. 61-69, 2008.
\*--------------------------------------------------------------------------*/
#ifndef _CUBIC_BSPLINE_PREFILTER_KERNEL_H_
#define _CUBIC_BSPLINE_PREFILTER_KERNEL_H_
#include "math_func.cu"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "helper_math.h"
// The code below is based on the work of Philippe Thevenaz.
// See <http://bigwww.epfl.ch/thevenaz/interpolation/>
#define Pole (sqrt(3.0f)-2.0f) //pole for cubic b-spline
//--------------------------------------------------------------------------
// Local GPU device procedures
//--------------------------------------------------------------------------
template<class floatN>
__host__ __device__ floatN InitialCausalCoefficient(
floatN* c, // coefficients
uint DataLength, // number of coefficients
int step) // element interleave in bytes
{
const uint Horizon = UMIN(12, DataLength);
// this initialization corresponds to clamping boundaries
// accelerated loop
float zn = Pole;
floatN Sum = *c;
for (uint n = 0; n < Horizon; n++) {
Sum += zn * *c;
zn *= Pole;
c = (floatN*)((uchar*)c + step);
}
return(Sum);
}
template<class floatN>
__host__ __device__ floatN InitialAntiCausalCoefficient(
floatN* c, // last coefficient
uint DataLength, // number of samples or coefficients
int step) // element interleave in bytes
{
// this initialization corresponds to clamping boundaries
return((Pole / (Pole - 1.0f)) * *c);
}
template<class floatN>
__host__ __device__ void ConvertToInterpolationCoefficients(
floatN* coeffs, // input samples --> output coefficients
uint DataLength, // number of samples or coefficients
int step) // element interleave in bytes
{
// compute the overall gain
const float Lambda = (1.0f - Pole) * (1.0f - 1.0f / Pole);
// causal initialization
floatN* c = coeffs;
floatN previous_c; //cache the previously calculated c rather than look it up again (faster!)
*c = previous_c = Lambda * InitialCausalCoefficient(c, DataLength, step);
// causal recursion
for (uint n = 1; n < DataLength; n++) {
c = (floatN*)((uchar*)c + step);
*c = previous_c = Lambda * *c + Pole * previous_c;
}
// anticausal initialization
*c = previous_c = InitialAntiCausalCoefficient(c, DataLength, step);
// anticausal recursion
for (int n = DataLength - 2; 0 <= n; n--) {
c = (floatN*)((uchar*)c - step);
*c = previous_c = Pole * (previous_c - *c);
}
}
#endif // _CUBIC_BSPLINE_PREFILTER_KERNEL_H_
| a9d8abbfc85aae519949e7221d610d92d3f90cae.cu | /*--------------------------------------------------------------------------*\
Copyright (c) 2008-2012, Danny Ruijters. All rights reserved.
http://www.dannyruijters.nl/cubicinterpolation/
This file is part of CUDA Cubic B-Spline Interpolation (CI).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
When using this code in a scientific project, please cite one or all of the
following papers:
* Daniel Ruijters and Philippe Th¨¦venaz,
GPU Prefilter for Accurate Cubic B-Spline Interpolation,
The Computer Journal, vol. 55, no. 1, pp. 15-20, January 2012.
http://dannyruijters.nl/docs/cudaPrefilter3.pdf
* Daniel Ruijters, Bart M. ter Haar Romeny, and Paul Suetens,
Efficient GPU-Based Texture Interpolation using Uniform B-Splines,
Journal of Graphics Tools, vol. 13, no. 4, pp. 61-69, 2008.
\*--------------------------------------------------------------------------*/
#ifndef _CUBIC_BSPLINE_PREFILTER_KERNEL_H_
#define _CUBIC_BSPLINE_PREFILTER_KERNEL_H_
#include "math_func.cu"
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "helper_math.h"
// The code below is based on the work of Philippe Thevenaz.
// See <http://bigwww.epfl.ch/thevenaz/interpolation/>
#define Pole (sqrt(3.0f)-2.0f) //pole for cubic b-spline
//--------------------------------------------------------------------------
// Local GPU device procedures
//--------------------------------------------------------------------------
template<class floatN>
__host__ __device__ floatN InitialCausalCoefficient(
floatN* c, // coefficients
uint DataLength, // number of coefficients
int step) // element interleave in bytes
{
const uint Horizon = UMIN(12, DataLength);
// this initialization corresponds to clamping boundaries
// accelerated loop
float zn = Pole;
floatN Sum = *c;
for (uint n = 0; n < Horizon; n++) {
Sum += zn * *c;
zn *= Pole;
c = (floatN*)((uchar*)c + step);
}
return(Sum);
}
template<class floatN>
__host__ __device__ floatN InitialAntiCausalCoefficient(
floatN* c, // last coefficient
uint DataLength, // number of samples or coefficients
int step) // element interleave in bytes
{
// this initialization corresponds to clamping boundaries
return((Pole / (Pole - 1.0f)) * *c);
}
template<class floatN>
__host__ __device__ void ConvertToInterpolationCoefficients(
floatN* coeffs, // input samples --> output coefficients
uint DataLength, // number of samples or coefficients
int step) // element interleave in bytes
{
// compute the overall gain
const float Lambda = (1.0f - Pole) * (1.0f - 1.0f / Pole);
// causal initialization
floatN* c = coeffs;
floatN previous_c; //cache the previously calculated c rather than look it up again (faster!)
*c = previous_c = Lambda * InitialCausalCoefficient(c, DataLength, step);
// causal recursion
for (uint n = 1; n < DataLength; n++) {
c = (floatN*)((uchar*)c + step);
*c = previous_c = Lambda * *c + Pole * previous_c;
}
// anticausal initialization
*c = previous_c = InitialAntiCausalCoefficient(c, DataLength, step);
// anticausal recursion
for (int n = DataLength - 2; 0 <= n; n--) {
c = (floatN*)((uchar*)c - step);
*c = previous_c = Pole * (previous_c - *c);
}
}
#endif // _CUBIC_BSPLINE_PREFILTER_KERNEL_H_
|
203c217773b5c56a8f41f1e7f1991749748acf01.hip | // !!! This is a file automatically generated by hipify!!!
#include "Macro.h"
#include "CUFLU.h"
#if ( MODEL == HYDRO && defined GPU )
#ifdef UNSPLIT_GRAVITY
#include "CUPOT.h"
__constant__ double c_ExtAcc_AuxArray[EXT_ACC_NAUX_MAX];
//-------------------------------------------------------------------------------------------------------
// Function : CUFLU_SetConstMem_FluidSolver_ExtAcc
// Description : Set the constant memory of c_ExtAcc_AuxArray[] used by CUFLU_FluidSolver_CTU/MHM()
//
// Note : 1. Adopt the suggested approach for CUDA version >= 5.0
// 2. Invoked by CUAPI_Init_ExternalAccPot()
//
// Parameter : None
//
// Return : 0/-1 : successful/failed
//---------------------------------------------------------------------------------------------------
__host__
int CUFLU_SetConstMem_FluidSolver_ExtAcc( double h_ExtAcc_AuxArray[] )
{
if ( hipSuccess != hipMemcpyToSymbol( c_ExtAcc_AuxArray, h_ExtAcc_AuxArray, EXT_ACC_NAUX_MAX*sizeof(double),
0, hipMemcpyHostToDevice) )
return -1;
else
return 0;
} // FUNCTION : CUFLU_SetConstMem_FluidSolver_ExtAcc
#endif // #ifdef UNSPLIT_GRAVITY
#if ( NCOMP_PASSIVE > 0 )
__constant__ int c_NormIdx[NCOMP_PASSIVE];
//-------------------------------------------------------------------------------------------------------
// Function : CUFLU_SetConstMem_FluidSolver_NormIdx
// Description : Set the constant memory of c_NormIdx[] used by CUFLU_FluidSolver_CTU/MHM()
//
// Note : 1. Adopt the suggested approach for CUDA version >= 5.0
// 2. Invoked by CUAPI_Set_Default_GPU_Parameter()
//
// Parameter : None
//
// Return : 0/-1 : successful/failed
//---------------------------------------------------------------------------------------------------
__host__
int CUFLU_SetConstMem_FluidSolver_NormIdx( int h_NormIdx[] )
{
if ( hipSuccess != hipMemcpyToSymbol( c_NormIdx, h_NormIdx, NCOMP_PASSIVE*sizeof(int),
0, hipMemcpyHostToDevice) )
return -1;
else
return 0;
} // FUNCTION : CUFLU_SetConstMem_FluidSolver_NormIdx
#else // #if ( NCOMP_PASSIVE > 0 )
__constant__ int *c_NormIdx = NULL;
#endif // #if ( NCOMP_PASSIVE > 0 ) ... else ...
#endif // #if ( MODEL == HYDRO && defined GPU )
| 203c217773b5c56a8f41f1e7f1991749748acf01.cu | #include "Macro.h"
#include "CUFLU.h"
#if ( MODEL == HYDRO && defined GPU )
#ifdef UNSPLIT_GRAVITY
#include "CUPOT.h"
__constant__ double c_ExtAcc_AuxArray[EXT_ACC_NAUX_MAX];
//-------------------------------------------------------------------------------------------------------
// Function : CUFLU_SetConstMem_FluidSolver_ExtAcc
// Description : Set the constant memory of c_ExtAcc_AuxArray[] used by CUFLU_FluidSolver_CTU/MHM()
//
// Note : 1. Adopt the suggested approach for CUDA version >= 5.0
// 2. Invoked by CUAPI_Init_ExternalAccPot()
//
// Parameter : None
//
// Return : 0/-1 : successful/failed
//---------------------------------------------------------------------------------------------------
__host__
int CUFLU_SetConstMem_FluidSolver_ExtAcc( double h_ExtAcc_AuxArray[] )
{
if ( cudaSuccess != cudaMemcpyToSymbol( c_ExtAcc_AuxArray, h_ExtAcc_AuxArray, EXT_ACC_NAUX_MAX*sizeof(double),
0, cudaMemcpyHostToDevice) )
return -1;
else
return 0;
} // FUNCTION : CUFLU_SetConstMem_FluidSolver_ExtAcc
#endif // #ifdef UNSPLIT_GRAVITY
#if ( NCOMP_PASSIVE > 0 )
__constant__ int c_NormIdx[NCOMP_PASSIVE];
//-------------------------------------------------------------------------------------------------------
// Function : CUFLU_SetConstMem_FluidSolver_NormIdx
// Description : Set the constant memory of c_NormIdx[] used by CUFLU_FluidSolver_CTU/MHM()
//
// Note : 1. Adopt the suggested approach for CUDA version >= 5.0
// 2. Invoked by CUAPI_Set_Default_GPU_Parameter()
//
// Parameter : None
//
// Return : 0/-1 : successful/failed
//---------------------------------------------------------------------------------------------------
__host__
int CUFLU_SetConstMem_FluidSolver_NormIdx( int h_NormIdx[] )
{
if ( cudaSuccess != cudaMemcpyToSymbol( c_NormIdx, h_NormIdx, NCOMP_PASSIVE*sizeof(int),
0, cudaMemcpyHostToDevice) )
return -1;
else
return 0;
} // FUNCTION : CUFLU_SetConstMem_FluidSolver_NormIdx
#else // #if ( NCOMP_PASSIVE > 0 )
__constant__ int *c_NormIdx = NULL;
#endif // #if ( NCOMP_PASSIVE > 0 ) ... else ...
#endif // #if ( MODEL == HYDRO && defined GPU )
|
dba8eedad00f881a62037b07009d3c1b63641629.hip | // !!! This is a file automatically generated by hipify!!!
/*
============================================================================
Name : MMShared.cu
Author : Liam Lefferts
Version : 1.0
Copyright : Your copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
// System includes
#include <stdio.h>
#include<stdlib.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
#define TILE_WIDTH 16
typedef struct
{
int numRows;
int numCols;
int stride;
float * elements;
} Matrix;
// Fill matrix elements
void initialize(float *data, unsigned size)
{
for (unsigned i = 0; i < size; ++i)
data[i] = .5*(i+1);
}
//
// Matrix Multiplication CPU for error checking
//
void matrixmult(float *fa, float *fb, float *fc,int Hight, int Width){
int row, col, k;
float Pvalue=0;
for (row=0; row<Hight; row++){
for(col=0; col<Width; col++) {
Pvalue=0;
for(k=0; k<Width; k++){
Pvalue+=fa[row*Width+k]*fb[k*Width+col];
}
fc[row*Width+col]=Pvalue;
}
}
}
//Compute C=A*B in GPU non shared memory
__global__ void matrixMultiply(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
//identify row and column to work on
int row=blockIdx.y*blockDim.y+threadIdx.y;
int col=blockIdx.x*blockDim.x+threadIdx.x;
int i= row*numCColumns+col;
float Pvalue=0; int k;
if(row<numARows && col<numBColumns){
for(k=0; k<numBColumns; k++){
Pvalue+=A[row*numAColumns+k]*B[k*numBColumns+col];
}
C[i]=Pvalue;
}
}
/* matrixMultiplyShared -
* Compute C = A*B in GPU shared memory
*
* Takes:
* Matrix d_A, d_B; matrices to compute
* Matrix d_C; result matrix
*
* Returns: void
*/
__global__ void
matrixMultiplyShared(Matrix d_A, Matrix d_B, Matrix d_C)
{
// Each thread block computes one sub-matrix sub_C of d_C
Matrix sub_C;
sub_C.numCols = sub_C.numRows = TILE_WIDTH;
sub_C.stride = d_C.stride;
sub_C.elements = &d_C.elements[d_C.stride
* TILE_WIDTH * blockIdx.y
+ TILE_WIDTH * blockIdx.x];
// Each thread computes one element of sub_C by accumulating results into Pvalue
float Pvalue = 0.0;
// Loop over all the sub-matrices of d_A and d_B that are required to compute sub_C
// Multiply each pair of sub-matrices together and accumulate the results
for (int m = 0; m < (d_A.numCols / TILE_WIDTH); ++m) {
//Write sub-matrix sub_A
Matrix sub_A;
sub_A.numCols = sub_A.numRows = TILE_WIDTH;
sub_A.stride = d_A.stride;
sub_A.elements = &d_A.elements[d_A.stride
* TILE_WIDTH * blockIdx.y
+ TILE_WIDTH * blockIdx.x];
//Write sub-matrix sub_B
Matrix sub_B;
sub_B.numCols = sub_B.numRows = TILE_WIDTH;
sub_B.stride = d_B.stride;
sub_B.elements = &d_B.elements[d_B.stride
* TILE_WIDTH * blockIdx.y
+ TILE_WIDTH * blockIdx.x];
// Shared memory used to store sum_A and sum_B respectively
__shared__ float sharedA[TILE_WIDTH][TILE_WIDTH];
__shared__ float sharedB[TILE_WIDTH][TILE_WIDTH];
// Each thread loads one element of sum_A and sum_B from device to shared memory
sharedA[threadIdx.y][threadIdx.x] =
sum_A.elements[threadIdx.y * A.stride + threadIdx.x];
sharedB[threadIdx.y][threadIdx.x] =
sum_B.elements[threadIdx.y * B.stride + threadIdx.x];
// Synchronize to ensure sub-matrices loaded
__syncthreads();
// Multiplicatio logic
for (int i = 0; i < TILE_WIDTH; ++i)
Pvalue += sharedA[threadIdx.y][i] * sharedB[i][threadIdx.x];
// Synchronize to ensures sub-matricies computed
__syncthreads();
}
// Each thread writes one sub_C element to global device memory
sub_C.elements[threadIdx.y * sub_C.stride + threadIdx.x] = Pvalue;
}
/* printMatricesCheck
* Prints matrices check status to stdout
*
* Takes:
* float * A, B; matrices to compare
* int numCRows, numCColumns; matrix dimensions
*
* Returns: void
*/
void
printMatricesCheck(float * A, float * B, int numCRows, int numCColumns)
{
float accum = 0;
for (int i = 0; i < numCRows * numCColumns; ++i)
{
accum+=abs(A[i]-B[i]);
if(accum!=0)
printf("FAILED\n");
}
if (accum == 0)
{
printf("Matrices match...\n");
printf("\nSUCCESSFUL!\n");
// print MM result
for (int i = 0; i < numCRows * numCColumns; ++i)
{
if(i % numCColumns)
printf("\n");
printf("%lf ", A[i]);
}
}
}
int main(int argc, char ** argv)
{
int numARows, numAColumns,
numBRows, numBColumns,
numCRows, numCColumns;
if(argc != 4)
{
printf("Usage: %s <matrix A row count>, <matrix A column count>, <matrix B column count>", argv[0]);
exit(1);
}
// number of Matrix A rows must equal Matrix C rows
numCRows = numARows = atoi(argv[1]);
// number of Matrix A columns must equal Matrix B rows
numAColumns = numBRows = atoi(argv[2]);
// number of Matrix B columns must equal Matric C columns
numCColumns = numBColumns = atoi(argv[3]);
printf("A[%d x %d] * B[%d x %d] = C[%d x %d]",
numARows, numAColumns,
numBRows, numBColumns,
numCRows, numCColumns);
//Dimensions requirment check MM
if(numAColumns != numBRows)
{
printf("numAColumns != numBRows, This matrix cannot be multiplied");
exit(1);
}
//alloc host memory
float *hostA = new float[numARows*numAColumns]; //input matrix A
float *hostB = new float[numBRows*numBColumns]; //input matrix B
float *hostC = new float[numCRows*numCColumns]; //output matrix C
float *hostD = new float[numCRows*numCColumns]; //output matrix D
float *hostE = new float[numCRows*numCColumns]; //output matrix E
initialize (hostA, numARows*numAColumns);
initialize (hostB, numBRows*numBColumns);
//do MM on CPU for timing
matrixmult(hostA, hostB, hostC, numCRows, numCColumns);
//device variables
float * deviceA;
float * deviceB;
float * deviceC;
//Determine matrix memory sizes
unsigned int size_A = numARows * numAColumns;
unsigned int mem_size_A = sizeof(float) * size_A;
unsigned int size_B = numBRows * numBColumns;
unsigned int mem_size_B = sizeof(float) * size_B;
unsigned int size_C = numCRows * numCColumns;
unsigned int mem_size_C = sizeof(float) * size_C;
//Holds error value
hipError_t err;
//Allocate GPU memory
err = hipMalloc((void**) &deviceA, mem_size_A);
printf("CUDA malloc A: %s\n", hipGetErrorString(err));
err = hipMalloc((void**) &deviceB, mem_size_B);
printf("CUDA malloc B: %s\n", hipGetErrorString(err));
err = hipMalloc((void**) &deviceC, mem_size_C);
printf("CUDA malloc C: %s\n", hipGetErrorString(err));
//Copy memory to the GPU
err = hipMemcpy(deviceA, hostA, mem_size_A, hipMemcpyHostToDevice);
printf("Copy A off host: %s\n", hipGetErrorString(err));
err = hipMemcpy(deviceB, hostB, mem_size_B, hipMemcpyHostToDevice);
printf("Copy B off host: %s\n", hipGetErrorString(err));
//Initialize grid and block dimensions
dim3 threads(TILE_WIDTH, TILE_WIDTH);
dim3 grid(numBColumns / threads.x, numARows / threads.y);
//MM without shared memory
hipLaunchKernelGGL(( matrixMultiply), dim3(grid), dim3(threads), 0, 0, deviceA, deviceB, deviceC);
//Wait for all previously issued device commands before continuing
err = hipDeviceSynchronize();
printf("Run nsMM kernel: %s\n", hipGetErrorString(err));
//Copy the GPU memory back to the CPU here
err = hipMemcpy(hostE, deviceC, mem_size_C, hipMemcpyDeviceToHost);
printf("Copy C off of device: %s\n", hipGetErrorString(err));
printMatricesCheck(hostC, hostE, numCRows, numCColumns);
//Initialize matrix structures
Matrix d_A;
d_A.numCols = d_A.stride = numAColumns;
d_A.numRows = numARows;
d_A.elements = deviceA;
Matrix d_B;
d_B.numCols = d_B.stride = numBColumns;
d_B.numRows = numBRows;
d_B.elements = deviceB;
Matrix d_D;
d_D.numCols = d_D.stride = numCColumns;
d_D.numRows = numCRows;
d_D.elements = deviceC;
//Initialize grid and block dimensions
dim3 threads(TILE_WIDTH, TILE_WIDTH);
dim3 grid(numBColumns / threads.x, numARows / threads.y);
//Invoke kernel
hipLaunchKernelGGL(( matrixMultiplyShared), dim3(grid), dim3(threads), 0, 0, d_A, d_B, d_D);
//Wait for all previously issued device commands before continuing
err = hipDeviceSynchronize();
printf("Run sMM kernel: %s\n", hipGetErrorString(err));
//Copy the GPU memory back to CPU
err = hipMemcpy(hostD, d_D.elements, mem_size_C, hipMemcpyDeviceToHost);
printf("Copy D off of device: %s\n", hipGetErrorString(err));
printMatricesCheck(hostC, hostD, numCRows, numCColumns);
// Free GPU memory
// Free devices
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
//Free Matrices
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// Free CPU memory
free(hostA);
free(hostB);
free(hostC);
free(hostD);
return 0;
}
| dba8eedad00f881a62037b07009d3c1b63641629.cu | /*
============================================================================
Name : MMShared.cu
Author : Liam Lefferts
Version : 1.0
Copyright : Your copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
// System includes
#include <stdio.h>
#include<stdlib.h>
#include <assert.h>
// CUDA runtime
#include <cuda.h>
#define TILE_WIDTH 16
typedef struct
{
int numRows;
int numCols;
int stride;
float * elements;
} Matrix;
// Fill matrix elements
void initialize(float *data, unsigned size)
{
for (unsigned i = 0; i < size; ++i)
data[i] = .5*(i+1);
}
//
// Matrix Multiplication CPU for error checking
//
void matrixmult(float *fa, float *fb, float *fc,int Hight, int Width){
int row, col, k;
float Pvalue=0;
for (row=0; row<Hight; row++){
for(col=0; col<Width; col++) {
Pvalue=0;
for(k=0; k<Width; k++){
Pvalue+=fa[row*Width+k]*fb[k*Width+col];
}
fc[row*Width+col]=Pvalue;
}
}
}
//Compute C=A*B in GPU non shared memory
__global__ void matrixMultiply(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
//identify row and column to work on
int row=blockIdx.y*blockDim.y+threadIdx.y;
int col=blockIdx.x*blockDim.x+threadIdx.x;
int i= row*numCColumns+col;
float Pvalue=0; int k;
if(row<numARows && col<numBColumns){
for(k=0; k<numBColumns; k++){
Pvalue+=A[row*numAColumns+k]*B[k*numBColumns+col];
}
C[i]=Pvalue;
}
}
/* matrixMultiplyShared -
* Compute C = A*B in GPU shared memory
*
* Takes:
* Matrix d_A, d_B; matrices to compute
* Matrix d_C; result matrix
*
* Returns: void
*/
__global__ void
matrixMultiplyShared(Matrix d_A, Matrix d_B, Matrix d_C)
{
// Each thread block computes one sub-matrix sub_C of d_C
Matrix sub_C;
sub_C.numCols = sub_C.numRows = TILE_WIDTH;
sub_C.stride = d_C.stride;
sub_C.elements = &d_C.elements[d_C.stride
* TILE_WIDTH * blockIdx.y
+ TILE_WIDTH * blockIdx.x];
// Each thread computes one element of sub_C by accumulating results into Pvalue
float Pvalue = 0.0;
// Loop over all the sub-matrices of d_A and d_B that are required to compute sub_C
// Multiply each pair of sub-matrices together and accumulate the results
for (int m = 0; m < (d_A.numCols / TILE_WIDTH); ++m) {
//Write sub-matrix sub_A
Matrix sub_A;
sub_A.numCols = sub_A.numRows = TILE_WIDTH;
sub_A.stride = d_A.stride;
sub_A.elements = &d_A.elements[d_A.stride
* TILE_WIDTH * blockIdx.y
+ TILE_WIDTH * blockIdx.x];
//Write sub-matrix sub_B
Matrix sub_B;
sub_B.numCols = sub_B.numRows = TILE_WIDTH;
sub_B.stride = d_B.stride;
sub_B.elements = &d_B.elements[d_B.stride
* TILE_WIDTH * blockIdx.y
+ TILE_WIDTH * blockIdx.x];
// Shared memory used to store sum_A and sum_B respectively
__shared__ float sharedA[TILE_WIDTH][TILE_WIDTH];
__shared__ float sharedB[TILE_WIDTH][TILE_WIDTH];
// Each thread loads one element of sum_A and sum_B from device to shared memory
sharedA[threadIdx.y][threadIdx.x] =
sum_A.elements[threadIdx.y * A.stride + threadIdx.x];
sharedB[threadIdx.y][threadIdx.x] =
sum_B.elements[threadIdx.y * B.stride + threadIdx.x];
// Synchronize to ensure sub-matrices loaded
__syncthreads();
// Multiplicatio logic
for (int i = 0; i < TILE_WIDTH; ++i)
Pvalue += sharedA[threadIdx.y][i] * sharedB[i][threadIdx.x];
// Synchronize to ensures sub-matricies computed
__syncthreads();
}
// Each thread writes one sub_C element to global device memory
sub_C.elements[threadIdx.y * sub_C.stride + threadIdx.x] = Pvalue;
}
/* printMatricesCheck
* Prints matrices check status to stdout
*
* Takes:
* float * A, B; matrices to compare
* int numCRows, numCColumns; matrix dimensions
*
* Returns: void
*/
void
printMatricesCheck(float * A, float * B, int numCRows, int numCColumns)
{
float accum = 0;
for (int i = 0; i < numCRows * numCColumns; ++i)
{
accum+=abs(A[i]-B[i]);
if(accum!=0)
printf("FAILED\n");
}
if (accum == 0)
{
printf("Matrices match...\n");
printf("\nSUCCESSFUL!\n");
// print MM result
for (int i = 0; i < numCRows * numCColumns; ++i)
{
if(i % numCColumns)
printf("\n");
printf("%lf ", A[i]);
}
}
}
int main(int argc, char ** argv)
{
int numARows, numAColumns,
numBRows, numBColumns,
numCRows, numCColumns;
if(argc != 4)
{
printf("Usage: %s <matrix A row count>, <matrix A column count>, <matrix B column count>", argv[0]);
exit(1);
}
// number of Matrix A rows must equal Matrix C rows
numCRows = numARows = atoi(argv[1]);
// number of Matrix A columns must equal Matrix B rows
numAColumns = numBRows = atoi(argv[2]);
// number of Matrix B columns must equal Matric C columns
numCColumns = numBColumns = atoi(argv[3]);
printf("A[%d x %d] * B[%d x %d] = C[%d x %d]",
numARows, numAColumns,
numBRows, numBColumns,
numCRows, numCColumns);
//Dimensions requirment check MM
if(numAColumns != numBRows)
{
printf("numAColumns != numBRows, This matrix cannot be multiplied");
exit(1);
}
//alloc host memory
float *hostA = new float[numARows*numAColumns]; //input matrix A
float *hostB = new float[numBRows*numBColumns]; //input matrix B
float *hostC = new float[numCRows*numCColumns]; //output matrix C
float *hostD = new float[numCRows*numCColumns]; //output matrix D
float *hostE = new float[numCRows*numCColumns]; //output matrix E
initialize (hostA, numARows*numAColumns);
initialize (hostB, numBRows*numBColumns);
//do MM on CPU for timing
matrixmult(hostA, hostB, hostC, numCRows, numCColumns);
//device variables
float * deviceA;
float * deviceB;
float * deviceC;
//Determine matrix memory sizes
unsigned int size_A = numARows * numAColumns;
unsigned int mem_size_A = sizeof(float) * size_A;
unsigned int size_B = numBRows * numBColumns;
unsigned int mem_size_B = sizeof(float) * size_B;
unsigned int size_C = numCRows * numCColumns;
unsigned int mem_size_C = sizeof(float) * size_C;
//Holds error value
cudaError_t err;
//Allocate GPU memory
err = cudaMalloc((void**) &deviceA, mem_size_A);
printf("CUDA malloc A: %s\n", cudaGetErrorString(err));
err = cudaMalloc((void**) &deviceB, mem_size_B);
printf("CUDA malloc B: %s\n", cudaGetErrorString(err));
err = cudaMalloc((void**) &deviceC, mem_size_C);
printf("CUDA malloc C: %s\n", cudaGetErrorString(err));
//Copy memory to the GPU
err = cudaMemcpy(deviceA, hostA, mem_size_A, cudaMemcpyHostToDevice);
printf("Copy A off host: %s\n", cudaGetErrorString(err));
err = cudaMemcpy(deviceB, hostB, mem_size_B, cudaMemcpyHostToDevice);
printf("Copy B off host: %s\n", cudaGetErrorString(err));
//Initialize grid and block dimensions
dim3 threads(TILE_WIDTH, TILE_WIDTH);
dim3 grid(numBColumns / threads.x, numARows / threads.y);
//MM without shared memory
matrixMultiply<<< grid, threads>>>(deviceA, deviceB, deviceC);
//Wait for all previously issued device commands before continuing
err = cudaDeviceSynchronize();
printf("Run nsMM kernel: %s\n", cudaGetErrorString(err));
//Copy the GPU memory back to the CPU here
err = cudaMemcpy(hostE, deviceC, mem_size_C, cudaMemcpyDeviceToHost);
printf("Copy C off of device: %s\n", cudaGetErrorString(err));
printMatricesCheck(hostC, hostE, numCRows, numCColumns);
//Initialize matrix structures
Matrix d_A;
d_A.numCols = d_A.stride = numAColumns;
d_A.numRows = numARows;
d_A.elements = deviceA;
Matrix d_B;
d_B.numCols = d_B.stride = numBColumns;
d_B.numRows = numBRows;
d_B.elements = deviceB;
Matrix d_D;
d_D.numCols = d_D.stride = numCColumns;
d_D.numRows = numCRows;
d_D.elements = deviceC;
//Initialize grid and block dimensions
dim3 threads(TILE_WIDTH, TILE_WIDTH);
dim3 grid(numBColumns / threads.x, numARows / threads.y);
//Invoke kernel
matrixMultiplyShared<<< grid, threads>>>(d_A, d_B, d_D);
//Wait for all previously issued device commands before continuing
err = cudaDeviceSynchronize();
printf("Run sMM kernel: %s\n", cudaGetErrorString(err));
//Copy the GPU memory back to CPU
err = cudaMemcpy(hostD, d_D.elements, mem_size_C, cudaMemcpyDeviceToHost);
printf("Copy D off of device: %s\n", cudaGetErrorString(err));
printMatricesCheck(hostC, hostD, numCRows, numCColumns);
// Free GPU memory
// Free devices
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
//Free Matrices
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// Free CPU memory
free(hostA);
free(hostB);
free(hostC);
free(hostD);
return 0;
}
|
7ce552bc5496e852a7602cf6d5654e6f16e998e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/contrastive_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void ContrastiveLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count();
caffe_gpu_sub(
count,
bottom[0]->gpu_data(), // a
bottom[1]->gpu_data(), // b
diff_.mutable_gpu_data()); // a_i-b_i
caffe_gpu_powx(
count,
diff_.mutable_gpu_data(), // a_i-b_i
Dtype(2),
diff_sq_.mutable_gpu_data()); // (a_i-b_i)^2
caffe_gpu_gemv(
CblasNoTrans,
bottom[0]->num(),
bottom[0]->channels(),
Dtype(1.0),
diff_sq_.gpu_data(), // (a_i-b_i)^2
summer_vec_.gpu_data(),
Dtype(0.0),
dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
bool legacy_version =
this->layer_param_.contrastive_loss_param().legacy_version();
Dtype loss(0.0);
for (int i = 0; i < bottom[0]->num(); ++i) {
if (static_cast<int>(bottom[2]->cpu_data()[i])) { // similar pairs
//LOG(INFO) << "Similar pairs have label: " << static_cast<int>(bottom[2]->cpu_data()[i]);
loss += dist_sq_.cpu_data()[i];
} else { // dissimilar pairs
if (legacy_version) {
loss += ::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0));
} else {
Dtype dist = ::max(margin - sqrt(dist_sq_.cpu_data()[i]),
Dtype(0.0));
loss += dist*dist;
}
}
}
loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
__global__ void CLLBackward(const int count, const int channels,
const Dtype margin, const bool legacy_version, const Dtype alpha,
const Dtype* y, const Dtype* diff, const Dtype* dist_sq,
Dtype *bottom_diff) {
CUDA_KERNEL_LOOP(i, count) {
int n = i / channels; // the num index, to access y and dist_sq
if (static_cast<int>(y[n])) { // similar pairs
bottom_diff[i] = alpha * diff[i];
} else { // dissimilar pairs
Dtype mdist(0.0);
Dtype beta(0.0);
if (legacy_version) {
mdist = (margin - dist_sq[n]);
beta = -alpha;
} else {
Dtype dist = sqrt(dist_sq[n]);
mdist = (margin - dist);
beta = -alpha * mdist / (dist + Dtype(1e-4)) * diff[i];
}
if (mdist > 0.0) {
bottom_diff[i] = beta;
} else {
bottom_diff[i] = 0;
}
}
}
}
template <typename Dtype>
void ContrastiveLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const int count = bottom[0]->count();
const int channels = bottom[0]->channels();
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
const bool legacy_version =
this->layer_param_.contrastive_loss_param().legacy_version();
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(bottom[0]->num());
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( CLLBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels, margin, legacy_version, alpha,
bottom[2]->gpu_data(), // pair similarity 0 or 1
diff_.gpu_data(), // the cached eltwise difference between a and b
dist_sq_.gpu_data(), // the cached square distance between a and b
bottom[i]->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ContrastiveLossLayer);
} // namespace caffe
| 7ce552bc5496e852a7602cf6d5654e6f16e998e0.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/contrastive_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void ContrastiveLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count();
caffe_gpu_sub(
count,
bottom[0]->gpu_data(), // a
bottom[1]->gpu_data(), // b
diff_.mutable_gpu_data()); // a_i-b_i
caffe_gpu_powx(
count,
diff_.mutable_gpu_data(), // a_i-b_i
Dtype(2),
diff_sq_.mutable_gpu_data()); // (a_i-b_i)^2
caffe_gpu_gemv(
CblasNoTrans,
bottom[0]->num(),
bottom[0]->channels(),
Dtype(1.0),
diff_sq_.gpu_data(), // (a_i-b_i)^2
summer_vec_.gpu_data(),
Dtype(0.0),
dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
bool legacy_version =
this->layer_param_.contrastive_loss_param().legacy_version();
Dtype loss(0.0);
for (int i = 0; i < bottom[0]->num(); ++i) {
if (static_cast<int>(bottom[2]->cpu_data()[i])) { // similar pairs
//LOG(INFO) << "Similar pairs have label: " << static_cast<int>(bottom[2]->cpu_data()[i]);
loss += dist_sq_.cpu_data()[i];
} else { // dissimilar pairs
if (legacy_version) {
loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0));
} else {
Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]),
Dtype(0.0));
loss += dist*dist;
}
}
}
loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
__global__ void CLLBackward(const int count, const int channels,
const Dtype margin, const bool legacy_version, const Dtype alpha,
const Dtype* y, const Dtype* diff, const Dtype* dist_sq,
Dtype *bottom_diff) {
CUDA_KERNEL_LOOP(i, count) {
int n = i / channels; // the num index, to access y and dist_sq
if (static_cast<int>(y[n])) { // similar pairs
bottom_diff[i] = alpha * diff[i];
} else { // dissimilar pairs
Dtype mdist(0.0);
Dtype beta(0.0);
if (legacy_version) {
mdist = (margin - dist_sq[n]);
beta = -alpha;
} else {
Dtype dist = sqrt(dist_sq[n]);
mdist = (margin - dist);
beta = -alpha * mdist / (dist + Dtype(1e-4)) * diff[i];
}
if (mdist > 0.0) {
bottom_diff[i] = beta;
} else {
bottom_diff[i] = 0;
}
}
}
}
template <typename Dtype>
void ContrastiveLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const int count = bottom[0]->count();
const int channels = bottom[0]->channels();
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
const bool legacy_version =
this->layer_param_.contrastive_loss_param().legacy_version();
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(bottom[0]->num());
// NOLINT_NEXT_LINE(whitespace/operators)
CLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, channels, margin, legacy_version, alpha,
bottom[2]->gpu_data(), // pair similarity 0 or 1
diff_.gpu_data(), // the cached eltwise difference between a and b
dist_sq_.gpu_data(), // the cached square distance between a and b
bottom[i]->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ContrastiveLossLayer);
} // namespace caffe
|
382c83dcc857b9b9d5270e1e954b0ce48f436701.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void sum(int *a, int *b, int *c){
*c = *a + *b;
} | 382c83dcc857b9b9d5270e1e954b0ce48f436701.cu | #include "includes.h"
__global__ void sum(int *a, int *b, int *c){
*c = *a + *b;
} |
d47ebbfa31ffe778369574f2209dcea412c4ddf8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../../common/book.h"
#include "stdlib.h"
#define delta(X,Y) ((X == Y) ? 1 : 0)
#define ceilDiv(X, Y) (((X) + (Y) - 1) / (Y))
#define max2(A,B) ((A) > (B) ? (A) : (B))
#define max4(A,B,C,D) (max2((A) , max2( (B) , max2( (C) , (D)))))
extern "C"{
#include "unixtimer.h"
}
char * readFragment(FILE * file, size_t size){
char * str;
int c;
size_t length = 0;
str = (char *) realloc(NULL, sizeof(char) * size);
if(!str)return str;
while( (c = fgetc(file)) != EOF && c != '\n'){
str[length++] = c;
if(length == size){
str = (char*) realloc(str, sizeof(char) * (size+=size));
if(!str)return str;
}
}
str[length++] = '\0';
return (char *)realloc(str, sizeof(char) * length);
}
typedef struct kernelData{
int start;
int end;
int windowLength;
int xlength;
int * matrix;
}kData;
typedef struct resultData{
char * cigar;
int score;
int location;
}result;
__device__ char * my_strcpy(char *dest, const char *src){
int i = 0;
do {
dest[i] = src[i];}
while (src[i++] != 0);
return dest;
}
__device__ char * my_strcat(char *dest, const char *src){
int i = 0;
while (dest[i] != 0) i++;
my_strcpy(dest+i, src);
return dest;
}
char * compressCigar(char * uncompressedCigar){
int length = strlen(uncompressedCigar);
char * compressedCigar = (char*) calloc(sizeof(char), length);
int start = length-1;
while(start > 0){
if(uncompressedCigar[start] == uncompressedCigar[start-1]){
int count = 1;
while(uncompressedCigar[start] == uncompressedCigar[start-1]){
count++;
start--;
}
char buf[15];
sprintf(buf, "%d%c", count, uncompressedCigar[start]);
strcat(compressedCigar, buf);
}else{
char * buf = (char*) calloc(sizeof(char), 1);
*buf = uncompressedCigar[start];
strcat(compressedCigar, buf);
free(buf);
}
start--;
}
return compressedCigar;
}
__global__ void alignKernel(char * x, char * y, kData* data, result * results){
int id = blockIdx.x;
int start = data[id].start;
int end = data[id].end;
int length = end-start;
int n = data[id].xlength;
int * device_matrix = data[id].matrix;
int max = 0, innerX, innerY;
for(int i = 0; i <= n; i++){
device_matrix[i * length + 0] = 0;
}
for(int j = 0; j <= length; j++){
device_matrix[0 * length + j] = 0;
}
for(int i = 1; i <= n; i++){
for(int j = 1; j <= length; j++){
int val = max4(0, device_matrix[(i-1)*length+j] -1, device_matrix[i*length+ (j-1)] -1, device_matrix[((i-1)*length+(j-1))] + delta(x[i-1], y[start+j-1]));
device_matrix[i*length+j] = val;
if(val > max){
max = val;
innerX = i;
innerY = j;
}
}
}
int xCord = innerX, yCord = innerY;
result myResults = results[id];
while(device_matrix[xCord* length + yCord] > 0 && (yCord > 0 && yCord > 0)){
if(device_matrix[xCord* length +yCord] == device_matrix[(xCord-1)* length +(yCord-1)] + delta(x[xCord-1], y[start + yCord-1])){
my_strcat(myResults.cigar, "M");
xCord--;
yCord--;
}else{
if(device_matrix[xCord* length + yCord] == device_matrix[(xCord-1) * length + yCord] - 1){
my_strcat(myResults.cigar, "I");
xCord--;
}else if(device_matrix[xCord * length + yCord] == device_matrix[xCord * length +(yCord-1)] -1){
my_strcat(myResults.cigar, "D");
yCord--;
}
}
}
myResults.location = yCord + start + 1;
myResults.score = max;
results[id] = myResults;
return;
}
void print_usage(char * cmd){
fprintf(stderr, "Usage: %s ", cmd);
fprintf(stderr, "[-threads] ");
fprintf(stderr, "[-overlap] ");
fprintf(stderr, "[-largefile] ");
fprintf(stderr, "[-smallfile] ");
fprintf(stderr, "[-windowsize] \n");
}
int main(int argc, char * argv[]){
FILE * xFile = stdin, * yFile = stdin;
int numThreads = 16, windowSize = 0, overlap = 0;
for(int i = 1; i < argc; i++){
if(!strncmp(argv[i], "-t", strlen("-t"))){
int userInput = atoi(argv[++i]);
if(userInput < 16){
printf("Invalid thread size entered. Using default thread number: %d\n", numThreads);
}else{
numThreads = userInput;
}
}else if(!strncmp(argv[i], "-o", strlen("-o"))){
overlap = atoi(argv[++i]);
}else if(!strncmp(argv[i], "-w", strlen("-w"))){
windowSize = atoi(argv[++i]);
}else if(!strncmp(argv[i], "-s", strlen("-s"))){
xFile =fopen(argv[++i], "r+");
}else if(!strncmp(argv[i], "-l", strlen("-l"))){
yFile = fopen(argv[++i], "r++");
}else{
print_usage(argv[0]);
}
}
if(xFile == stdin)
printf("Please enter the smaller fragment: ");
char * xFragment;
xFragment = readFragment(xFile, 256);
if(yFile == stdin)
printf("Please enter the larger fragment: ");
char * yFragment;
yFragment = readFragment(yFile, 2048);
int lenX = strlen(xFragment), lenY = strlen(yFragment);
if(overlap == 0)
overlap = lenX;
if(windowSize == 0)
windowSize = lenX * 3;
int nWindows = ceilDiv(lenY, windowSize);
char * x, *y;
HANDLE_ERROR(hipMalloc((void**) &x, sizeof(char) * lenX));
HANDLE_ERROR(hipMalloc((void**) &y, sizeof(char) * lenY));
HANDLE_ERROR(hipMemcpy(x, xFragment, sizeof(char) * lenX, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(y, yFragment, sizeof(char) * lenY, hipMemcpyHostToDevice));
kData* host_data = (kData*) malloc(sizeof(kData) * nWindows);
kData* device_data;
HANDLE_ERROR(hipMalloc((void**) &device_data, sizeof(kData) * nWindows));
//We now have our initialized data;
result * host_results = (result *) malloc(sizeof(result) * nWindows);
result * device_results;
HANDLE_ERROR(hipMalloc((void**) &device_results, sizeof(result) * nWindows));
//Initialized result structs
char * cigs[nWindows];
for(int i = 0; i < nWindows; i++){
int start = 0;
if(i == 0)
start = 0;
else
start = host_data[i-1].start - overlap + windowSize;
host_data[i].start = start;
int end = start + windowSize;
end = (end > lenY ? lenY : end);
host_data[i].end = end;
host_data[i].xlength = lenX;
host_data[i].windowLength = windowSize;
cigs[i] = (char *) malloc(sizeof(char) * lenX * 2);
HANDLE_ERROR(hipMalloc(&(host_results[i].cigar), sizeof(char) * lenX * 2));
HANDLE_ERROR(hipMalloc(&(host_data[i].matrix), sizeof(int) * (lenX +1) * (windowSize + 1)));
}
HANDLE_ERROR(hipMemcpy(device_data, host_data, sizeof(kData) * nWindows, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(device_results, host_results, sizeof(result) * nWindows, hipMemcpyHostToDevice));
int NITER = 10;
start_timer();
for(int i = 0; i < NITER; i++){
hipLaunchKernelGGL(( alignKernel), dim3(nWindows), dim3(1), 0, 0, x, y, device_data, device_results);
HANDLE_ERROR(hipDeviceSynchronize());
}
fprintf(stderr, "Average kernel time for %d iterations: %lf\n", NITER, NITER/cpu_seconds());
HANDLE_ERROR(hipMemcpy(host_results, device_results, sizeof(result) * nWindows, hipMemcpyDeviceToHost));
for(int i = 0; i < nWindows; i++){
HANDLE_ERROR(hipMemcpy(cigs[i], host_results[i].cigar, sizeof(char) * lenX * 2, hipMemcpyDeviceToHost));
}
int overallMax = 0, location = 0, index = 0;
for(int i = 0; i < nWindows; i++){
if(overallMax < host_results[i].score){
overallMax = host_results[i].score;
location = host_results[i].location;
index = i;
}
}
//char * compressed = compressCigar(cigs[index]);
printf("Best alignment found at %d :\n", location);
for(int i = 0; i < nWindows; i++){
free(cigs[i]);
HANDLE_ERROR(hipFree(host_results[i].cigar));
HANDLE_ERROR(hipFree(host_data[i].matrix));
}
free(host_results);
free(host_data);
HANDLE_ERROR(hipFree(x));
HANDLE_ERROR(hipFree(y));
HANDLE_ERROR(hipFree(device_data));
HANDLE_ERROR(hipFree(device_results));
return 0;
}
| d47ebbfa31ffe778369574f2209dcea412c4ddf8.cu | #include "../../common/book.h"
#include "stdlib.h"
#define delta(X,Y) ((X == Y) ? 1 : 0)
#define ceilDiv(X, Y) (((X) + (Y) - 1) / (Y))
#define max2(A,B) ((A) > (B) ? (A) : (B))
#define max4(A,B,C,D) (max2((A) , max2( (B) , max2( (C) , (D)))))
extern "C"{
#include "unixtimer.h"
}
char * readFragment(FILE * file, size_t size){
char * str;
int c;
size_t length = 0;
str = (char *) realloc(NULL, sizeof(char) * size);
if(!str)return str;
while( (c = fgetc(file)) != EOF && c != '\n'){
str[length++] = c;
if(length == size){
str = (char*) realloc(str, sizeof(char) * (size+=size));
if(!str)return str;
}
}
str[length++] = '\0';
return (char *)realloc(str, sizeof(char) * length);
}
typedef struct kernelData{
int start;
int end;
int windowLength;
int xlength;
int * matrix;
}kData;
typedef struct resultData{
char * cigar;
int score;
int location;
}result;
__device__ char * my_strcpy(char *dest, const char *src){
int i = 0;
do {
dest[i] = src[i];}
while (src[i++] != 0);
return dest;
}
__device__ char * my_strcat(char *dest, const char *src){
int i = 0;
while (dest[i] != 0) i++;
my_strcpy(dest+i, src);
return dest;
}
char * compressCigar(char * uncompressedCigar){
int length = strlen(uncompressedCigar);
char * compressedCigar = (char*) calloc(sizeof(char), length);
int start = length-1;
while(start > 0){
if(uncompressedCigar[start] == uncompressedCigar[start-1]){
int count = 1;
while(uncompressedCigar[start] == uncompressedCigar[start-1]){
count++;
start--;
}
char buf[15];
sprintf(buf, "%d%c", count, uncompressedCigar[start]);
strcat(compressedCigar, buf);
}else{
char * buf = (char*) calloc(sizeof(char), 1);
*buf = uncompressedCigar[start];
strcat(compressedCigar, buf);
free(buf);
}
start--;
}
return compressedCigar;
}
__global__ void alignKernel(char * x, char * y, kData* data, result * results){
int id = blockIdx.x;
int start = data[id].start;
int end = data[id].end;
int length = end-start;
int n = data[id].xlength;
int * device_matrix = data[id].matrix;
int max = 0, innerX, innerY;
for(int i = 0; i <= n; i++){
device_matrix[i * length + 0] = 0;
}
for(int j = 0; j <= length; j++){
device_matrix[0 * length + j] = 0;
}
for(int i = 1; i <= n; i++){
for(int j = 1; j <= length; j++){
int val = max4(0, device_matrix[(i-1)*length+j] -1, device_matrix[i*length+ (j-1)] -1, device_matrix[((i-1)*length+(j-1))] + delta(x[i-1], y[start+j-1]));
device_matrix[i*length+j] = val;
if(val > max){
max = val;
innerX = i;
innerY = j;
}
}
}
int xCord = innerX, yCord = innerY;
result myResults = results[id];
while(device_matrix[xCord* length + yCord] > 0 && (yCord > 0 && yCord > 0)){
if(device_matrix[xCord* length +yCord] == device_matrix[(xCord-1)* length +(yCord-1)] + delta(x[xCord-1], y[start + yCord-1])){
my_strcat(myResults.cigar, "M");
xCord--;
yCord--;
}else{
if(device_matrix[xCord* length + yCord] == device_matrix[(xCord-1) * length + yCord] - 1){
my_strcat(myResults.cigar, "I");
xCord--;
}else if(device_matrix[xCord * length + yCord] == device_matrix[xCord * length +(yCord-1)] -1){
my_strcat(myResults.cigar, "D");
yCord--;
}
}
}
myResults.location = yCord + start + 1;
myResults.score = max;
results[id] = myResults;
return;
}
void print_usage(char * cmd){
fprintf(stderr, "Usage: %s ", cmd);
fprintf(stderr, "[-threads] ");
fprintf(stderr, "[-overlap] ");
fprintf(stderr, "[-largefile] ");
fprintf(stderr, "[-smallfile] ");
fprintf(stderr, "[-windowsize] \n");
}
int main(int argc, char * argv[]){
FILE * xFile = stdin, * yFile = stdin;
int numThreads = 16, windowSize = 0, overlap = 0;
for(int i = 1; i < argc; i++){
if(!strncmp(argv[i], "-t", strlen("-t"))){
int userInput = atoi(argv[++i]);
if(userInput < 16){
printf("Invalid thread size entered. Using default thread number: %d\n", numThreads);
}else{
numThreads = userInput;
}
}else if(!strncmp(argv[i], "-o", strlen("-o"))){
overlap = atoi(argv[++i]);
}else if(!strncmp(argv[i], "-w", strlen("-w"))){
windowSize = atoi(argv[++i]);
}else if(!strncmp(argv[i], "-s", strlen("-s"))){
xFile =fopen(argv[++i], "r+");
}else if(!strncmp(argv[i], "-l", strlen("-l"))){
yFile = fopen(argv[++i], "r++");
}else{
print_usage(argv[0]);
}
}
if(xFile == stdin)
printf("Please enter the smaller fragment: ");
char * xFragment;
xFragment = readFragment(xFile, 256);
if(yFile == stdin)
printf("Please enter the larger fragment: ");
char * yFragment;
yFragment = readFragment(yFile, 2048);
int lenX = strlen(xFragment), lenY = strlen(yFragment);
if(overlap == 0)
overlap = lenX;
if(windowSize == 0)
windowSize = lenX * 3;
int nWindows = ceilDiv(lenY, windowSize);
char * x, *y;
HANDLE_ERROR(cudaMalloc((void**) &x, sizeof(char) * lenX));
HANDLE_ERROR(cudaMalloc((void**) &y, sizeof(char) * lenY));
HANDLE_ERROR(cudaMemcpy(x, xFragment, sizeof(char) * lenX, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(y, yFragment, sizeof(char) * lenY, cudaMemcpyHostToDevice));
kData* host_data = (kData*) malloc(sizeof(kData) * nWindows);
kData* device_data;
HANDLE_ERROR(cudaMalloc((void**) &device_data, sizeof(kData) * nWindows));
//We now have our initialized data;
result * host_results = (result *) malloc(sizeof(result) * nWindows);
result * device_results;
HANDLE_ERROR(cudaMalloc((void**) &device_results, sizeof(result) * nWindows));
//Initialized result structs
char * cigs[nWindows];
for(int i = 0; i < nWindows; i++){
int start = 0;
if(i == 0)
start = 0;
else
start = host_data[i-1].start - overlap + windowSize;
host_data[i].start = start;
int end = start + windowSize;
end = (end > lenY ? lenY : end);
host_data[i].end = end;
host_data[i].xlength = lenX;
host_data[i].windowLength = windowSize;
cigs[i] = (char *) malloc(sizeof(char) * lenX * 2);
HANDLE_ERROR(cudaMalloc(&(host_results[i].cigar), sizeof(char) * lenX * 2));
HANDLE_ERROR(cudaMalloc(&(host_data[i].matrix), sizeof(int) * (lenX +1) * (windowSize + 1)));
}
HANDLE_ERROR(cudaMemcpy(device_data, host_data, sizeof(kData) * nWindows, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(device_results, host_results, sizeof(result) * nWindows, cudaMemcpyHostToDevice));
int NITER = 10;
start_timer();
for(int i = 0; i < NITER; i++){
alignKernel<<<nWindows, 1>>>(x, y, device_data, device_results);
HANDLE_ERROR(cudaDeviceSynchronize());
}
fprintf(stderr, "Average kernel time for %d iterations: %lf\n", NITER, NITER/cpu_seconds());
HANDLE_ERROR(cudaMemcpy(host_results, device_results, sizeof(result) * nWindows, cudaMemcpyDeviceToHost));
for(int i = 0; i < nWindows; i++){
HANDLE_ERROR(cudaMemcpy(cigs[i], host_results[i].cigar, sizeof(char) * lenX * 2, cudaMemcpyDeviceToHost));
}
int overallMax = 0, location = 0, index = 0;
for(int i = 0; i < nWindows; i++){
if(overallMax < host_results[i].score){
overallMax = host_results[i].score;
location = host_results[i].location;
index = i;
}
}
//char * compressed = compressCigar(cigs[index]);
printf("Best alignment found at %d :\n", location);
for(int i = 0; i < nWindows; i++){
free(cigs[i]);
HANDLE_ERROR(cudaFree(host_results[i].cigar));
HANDLE_ERROR(cudaFree(host_data[i].matrix));
}
free(host_results);
free(host_data);
HANDLE_ERROR(cudaFree(x));
HANDLE_ERROR(cudaFree(y));
HANDLE_ERROR(cudaFree(device_data));
HANDLE_ERROR(cudaFree(device_results));
return 0;
}
|
095a287cfd45813bfcd890f79ae3dfc1f9c13a2b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <rocblas.h>
#include <time.h>
#define HEIGHT 1024
#define WIDTH 1024
#define BLOCK_SIZE 32
__global__ void matrix_mult(int *a, int *b, int *c){
int threadRow = threadIdx.y;
int threadCol = threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int c_val = 0;
for (int i = 0; i<(WIDTH/BLOCK_SIZE); i++) {
__shared__ int a_share[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int b_share[BLOCK_SIZE][BLOCK_SIZE];
// each thread reads one element from both A and B matrices into the shared sub-matrices
a_share[threadRow][threadCol] = a[row * WIDTH + col];
b_share[threadRow][threadCol] = b[row * WIDTH + col];
// make sure the sub-matrices are loaded before starting the computation
__syncthreads();
for (int i=0; i<BLOCK_SIZE; i++) {
c_val += a_share[threadRow][i] * b_share[i][threadCol];
}
// make sure every thread is done computing before loading new sub-matrices
__syncthreads();
}
c[row * WIDTH + col] = c_val;
}
int main(){
int i;
int *a = (int*)malloc(sizeof(int) * HEIGHT * WIDTH);
int *b = (int*)malloc(sizeof(int) * HEIGHT * WIDTH);
int *c = (int*)malloc(sizeof(int) * HEIGHT * WIDTH);
for(i=0; i<WIDTH * HEIGHT; i++){
a[i]=1;
b[i]=2;
}
int *gpu_a, *gpu_b, *gpu_c;
hipMalloc((void**)&gpu_a, sizeof(int) * HEIGHT * WIDTH);
hipMalloc((void**)&gpu_b, sizeof(int) * HEIGHT * WIDTH);
hipMalloc((void**)&gpu_c, sizeof(int) * HEIGHT * WIDTH);
struct timespec start, stop;
double time;
hipMemcpy(gpu_a, a, sizeof(int) * HEIGHT * WIDTH, hipMemcpyHostToDevice);
hipMemcpy(gpu_b, b, sizeof(int) * HEIGHT * WIDTH, hipMemcpyHostToDevice);
dim3 dimGrid(32, 32);
dim3 dimBlock(32, 32);
if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) { perror( "clock gettime" );}
hipLaunchKernelGGL(( matrix_mult), dim3(dimGrid), dim3(dimBlock), 0, 0, gpu_a, gpu_b, gpu_c);
hipMemcpy(c, gpu_c, sizeof(int) * HEIGHT * WIDTH, hipMemcpyDeviceToHost);
if( clock_gettime( CLOCK_REALTIME, &stop) == -1 ) { perror( "clock gettime" );}
time = (stop.tv_sec - start.tv_sec)+ (double)(stop.tv_nsec - start.tv_nsec)/1e9;
printf("time is %f ns\n", time*1e9);
printf("c[451][451]=%d\n", c[451*1024+451]);
free(a);
free(b);
free(c);
hipFree(gpu_a);
hipFree(gpu_b);
hipFree(gpu_c);
return 0;
}
| 095a287cfd45813bfcd890f79ae3dfc1f9c13a2b.cu | #include <stdlib.h>
#include <stdio.h>
#include <cublas.h>
#include <time.h>
#define HEIGHT 1024
#define WIDTH 1024
#define BLOCK_SIZE 32
__global__ void matrix_mult(int *a, int *b, int *c){
int threadRow = threadIdx.y;
int threadCol = threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int c_val = 0;
for (int i = 0; i<(WIDTH/BLOCK_SIZE); i++) {
__shared__ int a_share[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int b_share[BLOCK_SIZE][BLOCK_SIZE];
// each thread reads one element from both A and B matrices into the shared sub-matrices
a_share[threadRow][threadCol] = a[row * WIDTH + col];
b_share[threadRow][threadCol] = b[row * WIDTH + col];
// make sure the sub-matrices are loaded before starting the computation
__syncthreads();
for (int i=0; i<BLOCK_SIZE; i++) {
c_val += a_share[threadRow][i] * b_share[i][threadCol];
}
// make sure every thread is done computing before loading new sub-matrices
__syncthreads();
}
c[row * WIDTH + col] = c_val;
}
int main(){
int i;
int *a = (int*)malloc(sizeof(int) * HEIGHT * WIDTH);
int *b = (int*)malloc(sizeof(int) * HEIGHT * WIDTH);
int *c = (int*)malloc(sizeof(int) * HEIGHT * WIDTH);
for(i=0; i<WIDTH * HEIGHT; i++){
a[i]=1;
b[i]=2;
}
int *gpu_a, *gpu_b, *gpu_c;
cudaMalloc((void**)&gpu_a, sizeof(int) * HEIGHT * WIDTH);
cudaMalloc((void**)&gpu_b, sizeof(int) * HEIGHT * WIDTH);
cudaMalloc((void**)&gpu_c, sizeof(int) * HEIGHT * WIDTH);
struct timespec start, stop;
double time;
cudaMemcpy(gpu_a, a, sizeof(int) * HEIGHT * WIDTH, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, b, sizeof(int) * HEIGHT * WIDTH, cudaMemcpyHostToDevice);
dim3 dimGrid(32, 32);
dim3 dimBlock(32, 32);
if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) { perror( "clock gettime" );}
matrix_mult<<<dimGrid, dimBlock>>>(gpu_a, gpu_b, gpu_c);
cudaMemcpy(c, gpu_c, sizeof(int) * HEIGHT * WIDTH, cudaMemcpyDeviceToHost);
if( clock_gettime( CLOCK_REALTIME, &stop) == -1 ) { perror( "clock gettime" );}
time = (stop.tv_sec - start.tv_sec)+ (double)(stop.tv_nsec - start.tv_nsec)/1e9;
printf("time is %f ns\n", time*1e9);
printf("c[451][451]=%d\n", c[451*1024+451]);
free(a);
free(b);
free(c);
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_c);
return 0;
}
|
ce42c146373d1395793210a699985aba02aaf95a.hip | // !!! This is a file automatically generated by hipify!!!
// Solve the Laplace equation on a 2D lattice with boundary conditions.
//
// compile with the following command:
//
// (for GTX970)
// nvcc -arch=compute_52 -code=sm_52,sm_52 -O3 -m64 -o laplace laplace.cu
//
// (for GTX1060)
// nvcc -arch=compute_61 -code=sm_61,sm_61 -O3 -m64 -o laplace laplace.cu
// Includes
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
// field variables
float* h_new; // host field vectors
float* h_old;
int MAX=1000000; // maximum iterations
double eps=1.0e-10; // stopping criterion
int main(void)
{
printf("Solve Laplace equation on a 2D lattice with boundary conditions\n");
int Nx,Ny; // lattice size
printf("Enter the size of the square lattice: ");
scanf("%d %d",&Nx,&Ny);
printf("%d %d\n",Nx,Ny);
int size = Nx*Ny*sizeof(float);
h_new = (float*)malloc(size);
h_old = (float*)malloc(size);
memset(h_old, 0, size);
memset(h_new, 0, size);
// for(int j=0;j<Ny;j++)
// for(int i=0;i<Nx;i++)
// h_new[i+j*Nx]=0.0;
// Initialize the field vector with boundary conditions
for(int x=0; x<Nx; x++) {
h_new[x+Nx*(Ny-1)]=1.0;
h_old[x+Nx*(Ny-1)]=1.0;
h_new[x+Nx*(0)]=5.0;
h_new[x+Nx*(0)]=5.0;
}
for(int y=0; y<Ny; y++) {
h_new[0+Nx*(y)]=-1.0;
h_old[0+Nx*(y)]=-1.0;
h_new[Nx-1+Nx*(y)]=-2.0;
h_new[Nx-1+Nx*(y)]=-2.0;
}
FILE *out1; // save initial configuration in phi_initial_Tex.dat
out1 = fopen("phi_initial.dat","w");
fprintf(out1, "Inital field configuration:\n");
for(int j=Ny-1;j>-1;j--) {
for(int i=0; i<Nx; i++) {
fprintf(out1,"%.2e ",h_new[i+j*Nx]);
}
fprintf(out1,"\n");
}
fclose(out1);
printf("\n");
printf("Inital field configuration:\n");
for(int j=Ny-1;j>-1;j--) {
for(int i=0; i<Nx; i++) {
printf("%.2e ",h_new[i+j*Nx]);
}
printf("\n");
}
printf("\n");
// create the timer
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//start the timer
hipEventRecord(start,0);
// to compute the reference solution
double error = 10*eps; // any value bigger eps is OK
int iter = 0; // counter for iterations
volatile bool flag = true;
float t, l, r, b; // top, left, right, bottom
double diff;
int site, ym1, xm1, xp1, yp1;
while ( (error > eps) && (iter < MAX) ) {
if(flag) {
error = 0.0;
for(int y=0; y<Ny; y++) {
for(int x=0; x<Nx; x++) {
if(x==0 || x==Nx-1 || y==0 || y==Ny-1) {
}
else {
site = x+y*Nx;
xm1 = site - 1; // x-1
xp1 = site + 1; // x+1
ym1 = site - Nx; // y-1
yp1 = site + Nx; // y+1
b = h_old[ym1];
l = h_old[xm1];
r = h_old[xp1];
t = h_old[yp1];
h_new[site] = 0.25*(b+l+r+t);
diff = h_new[site]-h_old[site];
error = error + diff*diff;
}
}
}
}
else {
error = 0.0;
for(int y=0; y<Ny; y++) {
for(int x=0; x<Nx; x++) {
if(x==0 || x==Nx-1 || y==0 || y==Ny-1) {
}
else {
site = x+y*Nx;
xm1 = site - 1; // x-1
xp1 = site + 1; // x+1
ym1 = site - Nx; // y-1
yp1 = site + Nx; // y+1
b = h_new[ym1];
l = h_new[xm1];
r = h_new[xp1];
t = h_new[yp1];
h_old[site] = 0.25*(b+l+r+t);
diff = h_new[site]-h_old[site];
error = error + diff*diff;
}
}
}
}
flag = !flag;
iter++;
error = sqrt(error);
// printf("error = %.15e\n",error);
// printf("iteration = %d\n",iter);
} // exit if error < eps
printf("error = %.15e\n",error);
printf("total iterations = %d\n",iter);
// stop the timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float cputime;
hipEventElapsedTime( &cputime, start, stop);
printf("Processing time for CPU: %f (ms) \n",cputime);
double flops = 7.0*(Nx-2)*(Ny-2)*iter;
printf("CPU Gflops: %lf\n",flops/(1000000.0*cputime));
// destroy the timer
hipEventDestroy(start);
hipEventDestroy(stop);
FILE *outc; // save final configuration in phi_CPU.dat
outc = fopen("phi_CPU.dat","w");
fprintf(outc,"Final field configuration (CPU):\n");
for(int j=Ny-1;j>-1;j--) {
for(int i=0; i<Nx; i++) {
fprintf(outc,"%.2e ",h_new[i+j*Nx]);
}
fprintf(outc,"\n");
}
fclose(outc);
printf("\n");
printf("Final field configuration (CPU):\n");
for(int j=Ny-1;j>-1;j--) {
for(int i=0; i<Nx; i++) {
printf("%.2e ",h_new[i+j*Nx]);
}
printf("\n");
}
free(h_new);
free(h_old);
}
| ce42c146373d1395793210a699985aba02aaf95a.cu | // Solve the Laplace equation on a 2D lattice with boundary conditions.
//
// compile with the following command:
//
// (for GTX970)
// nvcc -arch=compute_52 -code=sm_52,sm_52 -O3 -m64 -o laplace laplace.cu
//
// (for GTX1060)
// nvcc -arch=compute_61 -code=sm_61,sm_61 -O3 -m64 -o laplace laplace.cu
// Includes
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
// field variables
float* h_new; // host field vectors
float* h_old;
int MAX=1000000; // maximum iterations
double eps=1.0e-10; // stopping criterion
int main(void)
{
printf("Solve Laplace equation on a 2D lattice with boundary conditions\n");
int Nx,Ny; // lattice size
printf("Enter the size of the square lattice: ");
scanf("%d %d",&Nx,&Ny);
printf("%d %d\n",Nx,Ny);
int size = Nx*Ny*sizeof(float);
h_new = (float*)malloc(size);
h_old = (float*)malloc(size);
memset(h_old, 0, size);
memset(h_new, 0, size);
// for(int j=0;j<Ny;j++)
// for(int i=0;i<Nx;i++)
// h_new[i+j*Nx]=0.0;
// Initialize the field vector with boundary conditions
for(int x=0; x<Nx; x++) {
h_new[x+Nx*(Ny-1)]=1.0;
h_old[x+Nx*(Ny-1)]=1.0;
h_new[x+Nx*(0)]=5.0;
h_new[x+Nx*(0)]=5.0;
}
for(int y=0; y<Ny; y++) {
h_new[0+Nx*(y)]=-1.0;
h_old[0+Nx*(y)]=-1.0;
h_new[Nx-1+Nx*(y)]=-2.0;
h_new[Nx-1+Nx*(y)]=-2.0;
}
FILE *out1; // save initial configuration in phi_initial_Tex.dat
out1 = fopen("phi_initial.dat","w");
fprintf(out1, "Inital field configuration:\n");
for(int j=Ny-1;j>-1;j--) {
for(int i=0; i<Nx; i++) {
fprintf(out1,"%.2e ",h_new[i+j*Nx]);
}
fprintf(out1,"\n");
}
fclose(out1);
printf("\n");
printf("Inital field configuration:\n");
for(int j=Ny-1;j>-1;j--) {
for(int i=0; i<Nx; i++) {
printf("%.2e ",h_new[i+j*Nx]);
}
printf("\n");
}
printf("\n");
// create the timer
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//start the timer
cudaEventRecord(start,0);
// to compute the reference solution
double error = 10*eps; // any value bigger eps is OK
int iter = 0; // counter for iterations
volatile bool flag = true;
float t, l, r, b; // top, left, right, bottom
double diff;
int site, ym1, xm1, xp1, yp1;
while ( (error > eps) && (iter < MAX) ) {
if(flag) {
error = 0.0;
for(int y=0; y<Ny; y++) {
for(int x=0; x<Nx; x++) {
if(x==0 || x==Nx-1 || y==0 || y==Ny-1) {
}
else {
site = x+y*Nx;
xm1 = site - 1; // x-1
xp1 = site + 1; // x+1
ym1 = site - Nx; // y-1
yp1 = site + Nx; // y+1
b = h_old[ym1];
l = h_old[xm1];
r = h_old[xp1];
t = h_old[yp1];
h_new[site] = 0.25*(b+l+r+t);
diff = h_new[site]-h_old[site];
error = error + diff*diff;
}
}
}
}
else {
error = 0.0;
for(int y=0; y<Ny; y++) {
for(int x=0; x<Nx; x++) {
if(x==0 || x==Nx-1 || y==0 || y==Ny-1) {
}
else {
site = x+y*Nx;
xm1 = site - 1; // x-1
xp1 = site + 1; // x+1
ym1 = site - Nx; // y-1
yp1 = site + Nx; // y+1
b = h_new[ym1];
l = h_new[xm1];
r = h_new[xp1];
t = h_new[yp1];
h_old[site] = 0.25*(b+l+r+t);
diff = h_new[site]-h_old[site];
error = error + diff*diff;
}
}
}
}
flag = !flag;
iter++;
error = sqrt(error);
// printf("error = %.15e\n",error);
// printf("iteration = %d\n",iter);
} // exit if error < eps
printf("error = %.15e\n",error);
printf("total iterations = %d\n",iter);
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float cputime;
cudaEventElapsedTime( &cputime, start, stop);
printf("Processing time for CPU: %f (ms) \n",cputime);
double flops = 7.0*(Nx-2)*(Ny-2)*iter;
printf("CPU Gflops: %lf\n",flops/(1000000.0*cputime));
// destroy the timer
cudaEventDestroy(start);
cudaEventDestroy(stop);
FILE *outc; // save final configuration in phi_CPU.dat
outc = fopen("phi_CPU.dat","w");
fprintf(outc,"Final field configuration (CPU):\n");
for(int j=Ny-1;j>-1;j--) {
for(int i=0; i<Nx; i++) {
fprintf(outc,"%.2e ",h_new[i+j*Nx]);
}
fprintf(outc,"\n");
}
fclose(outc);
printf("\n");
printf("Final field configuration (CPU):\n");
for(int j=Ny-1;j>-1;j--) {
for(int i=0; i<Nx; i++) {
printf("%.2e ",h_new[i+j*Nx]);
}
printf("\n");
}
free(h_new);
free(h_old);
}
|
004a87a58ccd20d374dc9caab0bb916c5e4cd540.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @author Oded Green <br>
* Georgia Institute of Technology, Computational Science and Engineering <br>
* ogreen@gatech.edu
* @date August, 2017
* @version v2
*
* @copyright Copyright 2017 Hornet. All rights reserved.
*
* @license{<blockquote>
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* </blockquote>}
*/
#include "Static/KatzCentrality/Katz.cuh"
#include "KatzOperators.cuh"
using length_t = int;
namespace hornets_nest {
/// TODO - changed hostKatzdata to pointer so that I can try to inherit it in
// the streaming case.
KatzCentrality::KatzCentrality(HornetGraph& hornet, int max_iteration, int K,
int max_degree, bool is_static) :
StaticAlgorithm(hornet),
load_balancing(hornet),
is_static(is_static) {
if (max_iteration <= 0)
ERROR("Number of max iterations should be greater than zero")
hd_katzdata().nV = hornet.nV();
hd_katzdata().K = K;
hd_katzdata().max_degree = max_degree;
hd_katzdata().alpha = 1.0 / (static_cast<double>(max_degree) + 1.0);
hd_katzdata().max_iteration = max_iteration;
auto nV = hornet.nV();
if (is_static) {
gpu::allocate(hd_katzdata().num_paths_data, nV * 2);
hd_katzdata().num_paths_prev = hd_katzdata().num_paths_data;
hd_katzdata().num_paths_curr = hd_katzdata().num_paths_data + nV;
hd_katzdata().num_paths = nullptr;
h_paths_ptr = nullptr;
}
else {
gpu::allocate(hd_katzdata().num_paths_data, nV * max_iteration);
gpu::allocate(hd_katzdata().num_paths, max_iteration);
host::allocate(h_paths_ptr, max_iteration);
for(int i = 0; i < max_iteration; i++)
h_paths_ptr[i] = hd_katzdata().num_paths_data + nV * i;
hd_katzdata().num_paths_prev = h_paths_ptr[0];
hd_katzdata().num_paths_curr = h_paths_ptr[1];
host::copyToDevice(h_paths_ptr, max_iteration, hd_katzdata().num_paths);
}
gpu::allocate(hd_katzdata().KC, nV);
gpu::allocate(hd_katzdata().lower_bound, nV);
gpu::allocate(hd_katzdata().upper_bound, nV);
gpu::allocate(hd_katzdata().is_active, nV);
gpu::allocate(hd_katzdata().vertex_array_sorted, nV);
gpu::allocate(hd_katzdata().vertex_array_unsorted, nV);
gpu::allocate(hd_katzdata().lower_bound_sorted, nV);
gpu::allocate(hd_katzdata().lower_bound_unsorted, nV);
reset();
}
KatzCentrality::~KatzCentrality() {
release();
}
void KatzCentrality::reset() {
hd_katzdata().iteration = 1;
if (is_static) {
hd_katzdata().num_paths_prev = hd_katzdata().num_paths_data;
hd_katzdata().num_paths_curr = hd_katzdata().num_paths_data +
hornet.nV();
}
else {
hd_katzdata().num_paths_prev = h_paths_ptr[0];
hd_katzdata().num_paths_curr = h_paths_ptr[1];
}
}
void KatzCentrality::release(){
gpu::free(hd_katzdata().num_paths_data);
gpu::free(hd_katzdata().num_paths);
gpu::free(hd_katzdata().KC);
gpu::free(hd_katzdata().lower_bound);
gpu::free(hd_katzdata().upper_bound);
gpu::free(hd_katzdata().vertex_array_sorted);
gpu::free(hd_katzdata().vertex_array_unsorted);
gpu::free(hd_katzdata().lower_bound_sorted);
gpu::free(hd_katzdata().lower_bound_unsorted);
host::free(h_paths_ptr);
}
void KatzCentrality::run() {
forAllnumV(hornet, Init { hd_katzdata });
hd_katzdata().iteration = 1;
hd_katzdata().num_active = hornet.nV();
while (hd_katzdata().num_active > hd_katzdata().K &&
hd_katzdata().iteration < hd_katzdata().max_iteration) {
hd_katzdata().alphaI = ::pow(hd_katzdata().alpha,
hd_katzdata().iteration);
hd_katzdata().lower_bound_const = ::pow(hd_katzdata().alpha,
hd_katzdata().iteration + 1) /
(1.0 - hd_katzdata().alpha);
hd_katzdata().upper_bound_const = ::pow(hd_katzdata().alpha,
hd_katzdata().iteration + 1) /
(1.0 - hd_katzdata().alpha *
static_cast<double>(hd_katzdata().max_degree));
hd_katzdata().num_active = 0; // Each iteration the number of active
// vertices is set to zero.
forAllnumV (hornet, InitNumPathsPerIteration { hd_katzdata } );
forAllEdges(hornet, UpdatePathCount { hd_katzdata },
load_balancing);
forAllnumV (hornet, UpdateKatzAndBounds { hd_katzdata } );
hd_katzdata.sync();
hd_katzdata().iteration++;
if(is_static) {
std::swap(hd_katzdata().num_paths_curr,
hd_katzdata().num_paths_prev);
}
else {
auto iter = hd_katzdata().iteration;
hd_katzdata().num_paths_prev = h_paths_ptr[iter - 1];
hd_katzdata().num_paths_curr = h_paths_ptr[iter - 0];
}
auto old_active_count = hd_katzdata().num_active;
hd_katzdata().num_prev_active = hd_katzdata().num_active;
hd_katzdata().num_active = 0; // Resetting active vertices for
// sorting
// Notice that the sorts the vertices in an incremental order based on
// the lower bounds.
// The algorithms requires the vertices to be sorted in an decremental
// fashion.
// As such, we use the num_prev_active variables to store the number of
// previous active vertices and are able to find the K-th from last
// vertex (which is essentially going from the tail of the array).
xlib::CubSortByKey<double, vid_t>::srun
(hd_katzdata().lower_bound_unsorted,
hd_katzdata().vertex_array_unsorted,
old_active_count, hd_katzdata().lower_bound_sorted,
hd_katzdata().vertex_array_sorted);
forAllnumV(hornet, CountActive { hd_katzdata } );
hd_katzdata.sync();
}
}
void KatzCentrality::copyKCToHost(double* d) {
gpu::copyToHost(hd_katzdata().KC, hornet.nV(), d);
}
// This function should only be used directly within run() and is currently
// commented out due to to large execution overheads.
void KatzCentrality::printKMostImportant() {
ulong_t* num_paths_curr;
ulong_t* num_paths_prev;
int* vertex_array;
int* vertex_array_unsorted;
double* KC;
double* lower_bound;
double* upper_bound;
auto nV = hornet.nV();
host::allocate(num_paths_curr, nV);
host::allocate(num_paths_prev, nV);
host::allocate(vertex_array, nV);
host::allocate(vertex_array_unsorted, nV);
host::allocate(KC, nV);
host::allocate(lower_bound, nV);
host::allocate(upper_bound, nV);
gpu::copyToHost(hd_katzdata().lower_bound, nV, lower_bound);
gpu::copyToHost(hd_katzdata().upper_bound, nV, upper_bound);
gpu::copyToHost(hd_katzdata().KC, nV, KC);
gpu::copyToHost(hd_katzdata().vertex_array_sorted, nV, vertex_array);
gpu::copyToHost(hd_katzdata().vertex_array_unsorted, nV,
vertex_array_unsorted);
if (hd_katzdata().num_prev_active > hd_katzdata().K) {
for (int i = hd_katzdata().num_prev_active - 1;
i >= hd_katzdata().num_prev_active - hd_katzdata().K; i--) {
vid_t j = vertex_array[i];
std::cout << j << "\t\t" << KC[j] << "\t\t" << upper_bound[j]
<< upper_bound[j] - lower_bound[j] << "\n";
}
}
std::cout << std::endl;
host::free(num_paths_curr);
host::free(num_paths_prev);
host::free(vertex_array);
host::free(vertex_array_unsorted);
host::free(KC);
host::free(lower_bound);
host::free(upper_bound);
}
int KatzCentrality::get_iteration_count() {
return hd_katzdata().iteration;
}
bool KatzCentrality::validate() {
return true;
}
} // namespace hornets_nest
| 004a87a58ccd20d374dc9caab0bb916c5e4cd540.cu | /**
* @author Oded Green <br>
* Georgia Institute of Technology, Computational Science and Engineering <br>
* ogreen@gatech.edu
* @date August, 2017
* @version v2
*
* @copyright Copyright © 2017 Hornet. All rights reserved.
*
* @license{<blockquote>
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* </blockquote>}
*/
#include "Static/KatzCentrality/Katz.cuh"
#include "KatzOperators.cuh"
using length_t = int;
namespace hornets_nest {
/// TODO - changed hostKatzdata to pointer so that I can try to inherit it in
// the streaming case.
KatzCentrality::KatzCentrality(HornetGraph& hornet, int max_iteration, int K,
int max_degree, bool is_static) :
StaticAlgorithm(hornet),
load_balancing(hornet),
is_static(is_static) {
if (max_iteration <= 0)
ERROR("Number of max iterations should be greater than zero")
hd_katzdata().nV = hornet.nV();
hd_katzdata().K = K;
hd_katzdata().max_degree = max_degree;
hd_katzdata().alpha = 1.0 / (static_cast<double>(max_degree) + 1.0);
hd_katzdata().max_iteration = max_iteration;
auto nV = hornet.nV();
if (is_static) {
gpu::allocate(hd_katzdata().num_paths_data, nV * 2);
hd_katzdata().num_paths_prev = hd_katzdata().num_paths_data;
hd_katzdata().num_paths_curr = hd_katzdata().num_paths_data + nV;
hd_katzdata().num_paths = nullptr;
h_paths_ptr = nullptr;
}
else {
gpu::allocate(hd_katzdata().num_paths_data, nV * max_iteration);
gpu::allocate(hd_katzdata().num_paths, max_iteration);
host::allocate(h_paths_ptr, max_iteration);
for(int i = 0; i < max_iteration; i++)
h_paths_ptr[i] = hd_katzdata().num_paths_data + nV * i;
hd_katzdata().num_paths_prev = h_paths_ptr[0];
hd_katzdata().num_paths_curr = h_paths_ptr[1];
host::copyToDevice(h_paths_ptr, max_iteration, hd_katzdata().num_paths);
}
gpu::allocate(hd_katzdata().KC, nV);
gpu::allocate(hd_katzdata().lower_bound, nV);
gpu::allocate(hd_katzdata().upper_bound, nV);
gpu::allocate(hd_katzdata().is_active, nV);
gpu::allocate(hd_katzdata().vertex_array_sorted, nV);
gpu::allocate(hd_katzdata().vertex_array_unsorted, nV);
gpu::allocate(hd_katzdata().lower_bound_sorted, nV);
gpu::allocate(hd_katzdata().lower_bound_unsorted, nV);
reset();
}
KatzCentrality::~KatzCentrality() {
release();
}
void KatzCentrality::reset() {
hd_katzdata().iteration = 1;
if (is_static) {
hd_katzdata().num_paths_prev = hd_katzdata().num_paths_data;
hd_katzdata().num_paths_curr = hd_katzdata().num_paths_data +
hornet.nV();
}
else {
hd_katzdata().num_paths_prev = h_paths_ptr[0];
hd_katzdata().num_paths_curr = h_paths_ptr[1];
}
}
void KatzCentrality::release(){
gpu::free(hd_katzdata().num_paths_data);
gpu::free(hd_katzdata().num_paths);
gpu::free(hd_katzdata().KC);
gpu::free(hd_katzdata().lower_bound);
gpu::free(hd_katzdata().upper_bound);
gpu::free(hd_katzdata().vertex_array_sorted);
gpu::free(hd_katzdata().vertex_array_unsorted);
gpu::free(hd_katzdata().lower_bound_sorted);
gpu::free(hd_katzdata().lower_bound_unsorted);
host::free(h_paths_ptr);
}
void KatzCentrality::run() {
forAllnumV(hornet, Init { hd_katzdata });
hd_katzdata().iteration = 1;
hd_katzdata().num_active = hornet.nV();
while (hd_katzdata().num_active > hd_katzdata().K &&
hd_katzdata().iteration < hd_katzdata().max_iteration) {
hd_katzdata().alphaI = std::pow(hd_katzdata().alpha,
hd_katzdata().iteration);
hd_katzdata().lower_bound_const = std::pow(hd_katzdata().alpha,
hd_katzdata().iteration + 1) /
(1.0 - hd_katzdata().alpha);
hd_katzdata().upper_bound_const = std::pow(hd_katzdata().alpha,
hd_katzdata().iteration + 1) /
(1.0 - hd_katzdata().alpha *
static_cast<double>(hd_katzdata().max_degree));
hd_katzdata().num_active = 0; // Each iteration the number of active
// vertices is set to zero.
forAllnumV (hornet, InitNumPathsPerIteration { hd_katzdata } );
forAllEdges(hornet, UpdatePathCount { hd_katzdata },
load_balancing);
forAllnumV (hornet, UpdateKatzAndBounds { hd_katzdata } );
hd_katzdata.sync();
hd_katzdata().iteration++;
if(is_static) {
std::swap(hd_katzdata().num_paths_curr,
hd_katzdata().num_paths_prev);
}
else {
auto iter = hd_katzdata().iteration;
hd_katzdata().num_paths_prev = h_paths_ptr[iter - 1];
hd_katzdata().num_paths_curr = h_paths_ptr[iter - 0];
}
auto old_active_count = hd_katzdata().num_active;
hd_katzdata().num_prev_active = hd_katzdata().num_active;
hd_katzdata().num_active = 0; // Resetting active vertices for
// sorting
// Notice that the sorts the vertices in an incremental order based on
// the lower bounds.
// The algorithms requires the vertices to be sorted in an decremental
// fashion.
// As such, we use the num_prev_active variables to store the number of
// previous active vertices and are able to find the K-th from last
// vertex (which is essentially going from the tail of the array).
xlib::CubSortByKey<double, vid_t>::srun
(hd_katzdata().lower_bound_unsorted,
hd_katzdata().vertex_array_unsorted,
old_active_count, hd_katzdata().lower_bound_sorted,
hd_katzdata().vertex_array_sorted);
forAllnumV(hornet, CountActive { hd_katzdata } );
hd_katzdata.sync();
}
}
void KatzCentrality::copyKCToHost(double* d) {
gpu::copyToHost(hd_katzdata().KC, hornet.nV(), d);
}
// This function should only be used directly within run() and is currently
// commented out due to to large execution overheads.
void KatzCentrality::printKMostImportant() {
ulong_t* num_paths_curr;
ulong_t* num_paths_prev;
int* vertex_array;
int* vertex_array_unsorted;
double* KC;
double* lower_bound;
double* upper_bound;
auto nV = hornet.nV();
host::allocate(num_paths_curr, nV);
host::allocate(num_paths_prev, nV);
host::allocate(vertex_array, nV);
host::allocate(vertex_array_unsorted, nV);
host::allocate(KC, nV);
host::allocate(lower_bound, nV);
host::allocate(upper_bound, nV);
gpu::copyToHost(hd_katzdata().lower_bound, nV, lower_bound);
gpu::copyToHost(hd_katzdata().upper_bound, nV, upper_bound);
gpu::copyToHost(hd_katzdata().KC, nV, KC);
gpu::copyToHost(hd_katzdata().vertex_array_sorted, nV, vertex_array);
gpu::copyToHost(hd_katzdata().vertex_array_unsorted, nV,
vertex_array_unsorted);
if (hd_katzdata().num_prev_active > hd_katzdata().K) {
for (int i = hd_katzdata().num_prev_active - 1;
i >= hd_katzdata().num_prev_active - hd_katzdata().K; i--) {
vid_t j = vertex_array[i];
std::cout << j << "\t\t" << KC[j] << "\t\t" << upper_bound[j]
<< upper_bound[j] - lower_bound[j] << "\n";
}
}
std::cout << std::endl;
host::free(num_paths_curr);
host::free(num_paths_prev);
host::free(vertex_array);
host::free(vertex_array_unsorted);
host::free(KC);
host::free(lower_bound);
host::free(upper_bound);
}
int KatzCentrality::get_iteration_count() {
return hd_katzdata().iteration;
}
bool KatzCentrality::validate() {
return true;
}
} // namespace hornets_nest
|
85b63f09cedd4c988a5f8168a9d41166b869bdb3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes)
{
int h = (in_h + pad - size) / stride + 1;
int w = (in_w + pad - size) / stride + 1;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad / 2;
int h_offset = -pad / 2;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i*stride + l;
int cur_w = w_offset + j*stride + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
} | 85b63f09cedd4c988a5f8168a9d41166b869bdb3.cu | #include "includes.h"
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes)
{
int h = (in_h + pad - size) / stride + 1;
int w = (in_w + pad - size) / stride + 1;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad / 2;
int h_offset = -pad / 2;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i*stride + l;
int cur_w = w_offset + j*stride + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
} |
0ef783926212c2cb7780bbe8857ada2ec876fd94.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/cu_inc/elementwise_impl.cuh"
namespace onnxruntime {
namespace cuda {
template <typename T, typename TLabel, typename TOut, bool IsWeighted>
struct OpSoftmaxCrossEntropyWeights {
OpSoftmaxCrossEntropyWeights(const TLabel* label_data, const T* weight_data, TLabel C, TLabel ignore_index)
: label_data_(label_data), weight_data_(weight_data), C_(C), ignore_index_(ignore_index) {}
__device__ __inline__ TOut operator()(CUDA_LONG idx) const {
if (label_data_[idx] != ignore_index_) {
if (IsWeighted) {
CUDA_KERNEL_ASSERT(label_data_[idx] >= 0 && label_data_[idx] < C_);
return TOut(weight_data_[label_data_[idx]]);
}
return TOut(1.f);
}
return TOut(0.f);
}
const TLabel* label_data_;
const T* weight_data_;
TLabel C_;
TLabel ignore_index_;
};
template <typename T, typename TLabel, typename TOut>
void ComputeSoftmaxCrossEntropyWeightsImpl(hipStream_t stream, const TLabel* label, const T* weight, size_t count,
size_t label_depth, int64_t ignore_index, TOut* weight_data_nd) {
if (weight) {
OpSoftmaxCrossEntropyWeights<T, TLabel, TOut, true> op(label, weight, static_cast<TLabel>(label_depth),
static_cast<TLabel>(ignore_index));
LaunchElementwiseKernel<TOut, decltype(op)>(stream, weight_data_nd, op, count);
} else {
OpSoftmaxCrossEntropyWeights<T, TLabel, TOut, false> op(label, nullptr, static_cast<TLabel>(label_depth),
static_cast<TLabel>(ignore_index));
LaunchElementwiseKernel<TOut, decltype(op)>(stream, weight_data_nd, op, count);
}
}
#define INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(T, TLabel, TOut) \
template void ComputeSoftmaxCrossEntropyWeightsImpl(hipStream_t stream, const TLabel* label, const T* weight, \
size_t count, size_t label_depth, int64_t ignore_index, \
TOut* weight_data_nd)
INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(float, int32_t, float);
INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(float, int64_t, float);
INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(half, int32_t, float);
INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(half, int64_t, float);
INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(half, int64_t, half);
INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(BFloat16, int64_t, BFloat16);
#undef INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL
template <typename T, typename TAcc, typename TLabel>
struct OpWeightedSoftmaxCrossEntropyLoss {
OpWeightedSoftmaxCrossEntropyLoss(const T* log_prob_data, const TLabel* label_data, const T* weight_data,
const TAcc* normalize_factor_data, TLabel C, TLabel ignore_index)
: log_prob_data_(log_prob_data),
label_data_(label_data),
weight_data_(weight_data),
normalize_factor_data_(normalize_factor_data),
C_(C),
ignore_index_(ignore_index) {}
__device__ __inline__ T operator()(CUDA_LONG idx) const {
if (label_data_[idx] != ignore_index_) {
CUDA_KERNEL_ASSERT(label_data_[idx] >= 0 && label_data_[idx] < C_);
return static_cast<T>(static_cast<TAcc>(-log_prob_data_[idx * C_ + label_data_[idx]] * weight_data_[idx]) /
(*normalize_factor_data_));
}
return T(0.f);
}
const T* log_prob_data_;
const TLabel* label_data_;
const T* weight_data_;
const TAcc* normalize_factor_data_;
TLabel C_;
TLabel ignore_index_;
};
template <typename T, typename TAcc, typename TLabel>
void SoftmaxCrossEntropyLossImpl(hipStream_t stream, const T* log_prob, const TLabel* label, const T* weight,
const TAcc* normalize_factor, size_t count, size_t label_depth, int64_t ignore_index,
T* output_data) {
OpWeightedSoftmaxCrossEntropyLoss<T, TAcc, TLabel> op(log_prob, label, weight, normalize_factor,
static_cast<TLabel>(label_depth), static_cast<TLabel>(ignore_index));
LaunchElementwiseKernel<T, decltype(op)>(stream, output_data, op, count);
}
template <typename T, typename TAcc, typename TLabel, typename TOut, bool IsReductionNone, bool HasBias>
struct OpWeightedSoftmaxCrossEntropyLossGrad {
OpWeightedSoftmaxCrossEntropyLossGrad(const T* dY_data, const T* log_prob_data, const TLabel* label_data,
const T* weight_data, const TAcc* normalize_factor_data, const TOut* bias_data,
TLabel C)
: dY_data_(dY_data),
log_prob_data_(log_prob_data),
label_data_(label_data),
weight_data_(weight_data),
normalize_factor_data_(normalize_factor_data),
bias_data_(bias_data),
C_(C) {
C_fdm_ = fast_divmod(static_cast<int>(C));
}
__device__ __inline__ TOut operator()(CUDA_LONG idx) const {
// normalize_factor is sum of labels' weights. Because zero sum implies all weights are 0, the loss function should
// be constant 0 and its corresponding gradient should be 0 as well.
TAcc result = TAcc(0.f);
if (*normalize_factor_data_ != TAcc(0.f)) {
int row, d;
C_fdm_.divmod(idx, row, d);
CUDA_KERNEL_ASSERT(weight_data_[row] == T(0.f) || (label_data_[row] >= 0 && label_data_[row] < C_));
result = static_cast<TAcc>((IsReductionNone ? dY_data_[row] : *dY_data_) * weight_data_[row]) *
(_Exp(static_cast<TAcc>(log_prob_data_[idx])) - (TAcc)(d == label_data_[row])) /
(*normalize_factor_data_);
}
return HasBias ? static_cast<TOut>(result + static_cast<TAcc>(bias_data_[idx])) : static_cast<TOut>(result);
}
const T* dY_data_;
const T* log_prob_data_;
const TLabel* label_data_;
const T* weight_data_;
const TAcc* normalize_factor_data_;
const TOut* bias_data_;
TLabel C_;
fast_divmod C_fdm_;
};
template <typename T, typename TAcc, typename TLabel, typename TOut>
void SoftmaxCrossEntropyLossGradImpl(hipStream_t stream, const T* dY, const T* log_prob, const TLabel* label,
const T* weight, const TAcc* normalize_factor, const TOut* bias_data, size_t count,
size_t label_depth, bool reduction_none, TOut* output_data) {
#define LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(is_reduction_none, has_bias) \
OpWeightedSoftmaxCrossEntropyLossGrad<T, TAcc, TLabel, TOut, is_reduction_none, has_bias> op( \
dY, log_prob, label, weight, normalize_factor, bias_data, static_cast<TLabel>(label_depth)); \
LaunchElementwiseKernel<TOut, decltype(op)>(stream, output_data, op, count * label_depth)
if (reduction_none) {
if (bias_data) {
LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(true, true);
} else {
LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(true, false);
}
} else {
if (bias_data) {
LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(false, true);
} else {
LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(false, false);
}
}
#undef LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL
}
#define INSTANTIATE_SCE_LOSS_IMPL(T, TAcc, TLabel) \
template void SoftmaxCrossEntropyLossImpl(hipStream_t stream, const T* log_prob, const TLabel* label, const T* weight, \
const TAcc* normalize_factor, size_t count, size_t label_depth, \
int64_t ignore_index, T* output_data);
INSTANTIATE_SCE_LOSS_IMPL(float, float, int32_t);
INSTANTIATE_SCE_LOSS_IMPL(float, float, int64_t);
INSTANTIATE_SCE_LOSS_IMPL(half, float, int64_t);
INSTANTIATE_SCE_LOSS_IMPL(BFloat16, float, int64_t);
#undef INSTANTIATE_SCE_LOSS_IMPL
#define INSTANTIATE_SCE_LOSS_GRAD_IMPL(T, TAcc, TLabel, TOut) \
template void SoftmaxCrossEntropyLossGradImpl(hipStream_t stream, const T* dY, const T* log_prob, const TLabel* label, \
const T* weight, const TAcc* normalize_factor, const TOut* bias_data, \
size_t count, size_t label_depth, bool reducation_none, \
TOut* output_data)
INSTANTIATE_SCE_LOSS_GRAD_IMPL(float, float, int32_t, float);
INSTANTIATE_SCE_LOSS_GRAD_IMPL(float, float, int32_t, half);
INSTANTIATE_SCE_LOSS_GRAD_IMPL(float, float, int64_t, float);
INSTANTIATE_SCE_LOSS_GRAD_IMPL(float, float, int64_t, half);
INSTANTIATE_SCE_LOSS_GRAD_IMPL(half, float, int64_t, half);
INSTANTIATE_SCE_LOSS_GRAD_IMPL(BFloat16, float, int64_t, BFloat16);
#undef INSTANTIATE_SCE_LOSS_GRAD_IMPL
} // namespace cuda
} // namespace onnxruntime
| 0ef783926212c2cb7780bbe8857ada2ec876fd94.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/cu_inc/elementwise_impl.cuh"
namespace onnxruntime {
namespace cuda {
template <typename T, typename TLabel, typename TOut, bool IsWeighted>
struct OpSoftmaxCrossEntropyWeights {
OpSoftmaxCrossEntropyWeights(const TLabel* label_data, const T* weight_data, TLabel C, TLabel ignore_index)
: label_data_(label_data), weight_data_(weight_data), C_(C), ignore_index_(ignore_index) {}
__device__ __inline__ TOut operator()(CUDA_LONG idx) const {
if (label_data_[idx] != ignore_index_) {
if (IsWeighted) {
CUDA_KERNEL_ASSERT(label_data_[idx] >= 0 && label_data_[idx] < C_);
return TOut(weight_data_[label_data_[idx]]);
}
return TOut(1.f);
}
return TOut(0.f);
}
const TLabel* label_data_;
const T* weight_data_;
TLabel C_;
TLabel ignore_index_;
};
template <typename T, typename TLabel, typename TOut>
void ComputeSoftmaxCrossEntropyWeightsImpl(cudaStream_t stream, const TLabel* label, const T* weight, size_t count,
size_t label_depth, int64_t ignore_index, TOut* weight_data_nd) {
if (weight) {
OpSoftmaxCrossEntropyWeights<T, TLabel, TOut, true> op(label, weight, static_cast<TLabel>(label_depth),
static_cast<TLabel>(ignore_index));
LaunchElementwiseKernel<TOut, decltype(op)>(stream, weight_data_nd, op, count);
} else {
OpSoftmaxCrossEntropyWeights<T, TLabel, TOut, false> op(label, nullptr, static_cast<TLabel>(label_depth),
static_cast<TLabel>(ignore_index));
LaunchElementwiseKernel<TOut, decltype(op)>(stream, weight_data_nd, op, count);
}
}
#define INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(T, TLabel, TOut) \
template void ComputeSoftmaxCrossEntropyWeightsImpl(cudaStream_t stream, const TLabel* label, const T* weight, \
size_t count, size_t label_depth, int64_t ignore_index, \
TOut* weight_data_nd)
INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(float, int32_t, float);
INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(float, int64_t, float);
INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(half, int32_t, float);
INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(half, int64_t, float);
INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(half, int64_t, half);
INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL(BFloat16, int64_t, BFloat16);
#undef INSTANTIATE_COMPUTE_SCE_WEIGHTS_IMPL
template <typename T, typename TAcc, typename TLabel>
struct OpWeightedSoftmaxCrossEntropyLoss {
OpWeightedSoftmaxCrossEntropyLoss(const T* log_prob_data, const TLabel* label_data, const T* weight_data,
const TAcc* normalize_factor_data, TLabel C, TLabel ignore_index)
: log_prob_data_(log_prob_data),
label_data_(label_data),
weight_data_(weight_data),
normalize_factor_data_(normalize_factor_data),
C_(C),
ignore_index_(ignore_index) {}
__device__ __inline__ T operator()(CUDA_LONG idx) const {
if (label_data_[idx] != ignore_index_) {
CUDA_KERNEL_ASSERT(label_data_[idx] >= 0 && label_data_[idx] < C_);
return static_cast<T>(static_cast<TAcc>(-log_prob_data_[idx * C_ + label_data_[idx]] * weight_data_[idx]) /
(*normalize_factor_data_));
}
return T(0.f);
}
const T* log_prob_data_;
const TLabel* label_data_;
const T* weight_data_;
const TAcc* normalize_factor_data_;
TLabel C_;
TLabel ignore_index_;
};
template <typename T, typename TAcc, typename TLabel>
void SoftmaxCrossEntropyLossImpl(cudaStream_t stream, const T* log_prob, const TLabel* label, const T* weight,
const TAcc* normalize_factor, size_t count, size_t label_depth, int64_t ignore_index,
T* output_data) {
OpWeightedSoftmaxCrossEntropyLoss<T, TAcc, TLabel> op(log_prob, label, weight, normalize_factor,
static_cast<TLabel>(label_depth), static_cast<TLabel>(ignore_index));
LaunchElementwiseKernel<T, decltype(op)>(stream, output_data, op, count);
}
template <typename T, typename TAcc, typename TLabel, typename TOut, bool IsReductionNone, bool HasBias>
struct OpWeightedSoftmaxCrossEntropyLossGrad {
OpWeightedSoftmaxCrossEntropyLossGrad(const T* dY_data, const T* log_prob_data, const TLabel* label_data,
const T* weight_data, const TAcc* normalize_factor_data, const TOut* bias_data,
TLabel C)
: dY_data_(dY_data),
log_prob_data_(log_prob_data),
label_data_(label_data),
weight_data_(weight_data),
normalize_factor_data_(normalize_factor_data),
bias_data_(bias_data),
C_(C) {
C_fdm_ = fast_divmod(static_cast<int>(C));
}
__device__ __inline__ TOut operator()(CUDA_LONG idx) const {
// normalize_factor is sum of labels' weights. Because zero sum implies all weights are 0, the loss function should
// be constant 0 and its corresponding gradient should be 0 as well.
TAcc result = TAcc(0.f);
if (*normalize_factor_data_ != TAcc(0.f)) {
int row, d;
C_fdm_.divmod(idx, row, d);
CUDA_KERNEL_ASSERT(weight_data_[row] == T(0.f) || (label_data_[row] >= 0 && label_data_[row] < C_));
result = static_cast<TAcc>((IsReductionNone ? dY_data_[row] : *dY_data_) * weight_data_[row]) *
(_Exp(static_cast<TAcc>(log_prob_data_[idx])) - (TAcc)(d == label_data_[row])) /
(*normalize_factor_data_);
}
return HasBias ? static_cast<TOut>(result + static_cast<TAcc>(bias_data_[idx])) : static_cast<TOut>(result);
}
const T* dY_data_;
const T* log_prob_data_;
const TLabel* label_data_;
const T* weight_data_;
const TAcc* normalize_factor_data_;
const TOut* bias_data_;
TLabel C_;
fast_divmod C_fdm_;
};
template <typename T, typename TAcc, typename TLabel, typename TOut>
void SoftmaxCrossEntropyLossGradImpl(cudaStream_t stream, const T* dY, const T* log_prob, const TLabel* label,
const T* weight, const TAcc* normalize_factor, const TOut* bias_data, size_t count,
size_t label_depth, bool reduction_none, TOut* output_data) {
#define LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(is_reduction_none, has_bias) \
OpWeightedSoftmaxCrossEntropyLossGrad<T, TAcc, TLabel, TOut, is_reduction_none, has_bias> op( \
dY, log_prob, label, weight, normalize_factor, bias_data, static_cast<TLabel>(label_depth)); \
LaunchElementwiseKernel<TOut, decltype(op)>(stream, output_data, op, count * label_depth)
if (reduction_none) {
if (bias_data) {
LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(true, true);
} else {
LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(true, false);
}
} else {
if (bias_data) {
LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(false, true);
} else {
LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL(false, false);
}
}
#undef LAUNCH_WEIGHTED_SOFTMAX_CROSS_ENTROPY_LOSS_GRAD_KERNEL
}
#define INSTANTIATE_SCE_LOSS_IMPL(T, TAcc, TLabel) \
template void SoftmaxCrossEntropyLossImpl(cudaStream_t stream, const T* log_prob, const TLabel* label, const T* weight, \
const TAcc* normalize_factor, size_t count, size_t label_depth, \
int64_t ignore_index, T* output_data);
INSTANTIATE_SCE_LOSS_IMPL(float, float, int32_t);
INSTANTIATE_SCE_LOSS_IMPL(float, float, int64_t);
INSTANTIATE_SCE_LOSS_IMPL(half, float, int64_t);
INSTANTIATE_SCE_LOSS_IMPL(BFloat16, float, int64_t);
#undef INSTANTIATE_SCE_LOSS_IMPL
#define INSTANTIATE_SCE_LOSS_GRAD_IMPL(T, TAcc, TLabel, TOut) \
template void SoftmaxCrossEntropyLossGradImpl(cudaStream_t stream, const T* dY, const T* log_prob, const TLabel* label, \
const T* weight, const TAcc* normalize_factor, const TOut* bias_data, \
size_t count, size_t label_depth, bool reducation_none, \
TOut* output_data)
INSTANTIATE_SCE_LOSS_GRAD_IMPL(float, float, int32_t, float);
INSTANTIATE_SCE_LOSS_GRAD_IMPL(float, float, int32_t, half);
INSTANTIATE_SCE_LOSS_GRAD_IMPL(float, float, int64_t, float);
INSTANTIATE_SCE_LOSS_GRAD_IMPL(float, float, int64_t, half);
INSTANTIATE_SCE_LOSS_GRAD_IMPL(half, float, int64_t, half);
INSTANTIATE_SCE_LOSS_GRAD_IMPL(BFloat16, float, int64_t, BFloat16);
#undef INSTANTIATE_SCE_LOSS_GRAD_IMPL
} // namespace cuda
} // namespace onnxruntime
|
fc50025f375e15af89eb1fdd819ba14f7166f99f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void histogramm(float* hist, unsigned char* input, int width, int height, int stride)
{
int index = blockIdx.x * blockDim.x * stride + threadIdx.x;
int size = width * height;
if (index > size - 1)
return;
__shared__ unsigned int histo_private[256];
#pragma unroll
for (int i = 0; i < 8; i++)
{
histo_private[threadIdx.x * 8 + i] = 0;
}
__syncthreads();
int i = 0;
while (i < stride && index < size)
{
int pixel = input[index];
atomicAdd(&(histo_private[pixel]), 1);
index += blockDim.x;
i++;
}
__syncthreads();
#pragma unroll
for (int i = 0; i < 8; i++)
{
int x_off = threadIdx.x * 8 + i;
hist[x_off * 3 + 0] = (x_off - 128.f) / 256.f * (float)width;
float factor = .48f;
float scaledValue = ((float)(histo_private[x_off]) / (float)size) - (factor / gridDim.x);
atomicAdd(&(hist[x_off * 3 + 1]), scaledValue * (float)height);
}
} | fc50025f375e15af89eb1fdd819ba14f7166f99f.cu | #include "includes.h"
__global__ void histogramm(float* hist, unsigned char* input, int width, int height, int stride)
{
int index = blockIdx.x * blockDim.x * stride + threadIdx.x;
int size = width * height;
if (index > size - 1)
return;
__shared__ unsigned int histo_private[256];
#pragma unroll
for (int i = 0; i < 8; i++)
{
histo_private[threadIdx.x * 8 + i] = 0;
}
__syncthreads();
int i = 0;
while (i < stride && index < size)
{
int pixel = input[index];
atomicAdd(&(histo_private[pixel]), 1);
index += blockDim.x;
i++;
}
__syncthreads();
#pragma unroll
for (int i = 0; i < 8; i++)
{
int x_off = threadIdx.x * 8 + i;
hist[x_off * 3 + 0] = (x_off - 128.f) / 256.f * (float)width;
float factor = .48f;
float scaledValue = ((float)(histo_private[x_off]) / (float)size) - (factor / gridDim.x);
atomicAdd(&(hist[x_off * 3 + 1]), scaledValue * (float)height);
}
} |
3c342f02dfd06826206bee5359337a3db26051c5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <hiprand/hiprand.h>
#include <hip/hip_runtime.h>
#define N 1024
__global__
void add(int row, int column, float *a, float *b, float *c){
int index = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float sum;
sum = 0;
__syncthreads();
atomicAdd(&sum, a[index + row * N] * b[index * N + column]);
__syncthreads();
c[row*N + column] = sum;
}
__global__
void multiply(float *a, float* b, float* c) {
int column = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if((row) < N && (column) < N){
hipLaunchKernelGGL(( add), dim3(1),dim3(N), 0, 0, row, column, a, b ,c);
hipDeviceSynchronize();
}
}
int main() {
struct timeval startc, end;
float ms;
long seconds, useconds;
double mtime;
float *a, *b, *c;
hipMallocManaged((void **)&a, N*N*sizeof(float));
hipMallocManaged((void **)&b, N*N*sizeof(float));
hipMallocManaged((void **)&c, N*N*sizeof(float));
hiprandGenerator_t gen;
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(gen, 1234UL);
hiprandGenerateUniform(gen, a, N*N);
hiprandGenerateUniform(gen, b, N*N);
hipDeviceSynchronize();
dim3 threadsPerBlock(N,N);
dim3 blocksPerGrid(1, 1);
if(N*N > 1024){
threadsPerBlock.x = 32;
threadsPerBlock.y = 32;
blocksPerGrid.x = ceil((double)N/(double)32);
blocksPerGrid.y = ceil((double)N/(double)32);
printf("%d - %d , %d - %d\n", blocksPerGrid.x, blocksPerGrid.y, threadsPerBlock.x, threadsPerBlock.y);
}
gettimeofday(&startc, NULL);
hipLaunchKernelGGL(( multiply), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, a, b, c);
hipDeviceSynchronize();
gettimeofday(&end, NULL);
seconds = end.tv_sec - startc.tv_sec;
useconds = end.tv_usec - startc.tv_usec;
mtime = useconds;
mtime/=1000;
mtime+=seconds*1000;
printf("\nGPU Time: %g\n", mtime);
float *hostC = (float *)malloc(N*N*(sizeof(float)));
gettimeofday(&startc, NULL);
for(int i = 0; i< N; i++){
for(int j = 0; j < N; j++){
hostC[i*N+j] = 0;
for(int k = 0; k < N; k++){
hostC[i*N+j] += a[i*N+k] * b[k*N+j];
}
}
}
gettimeofday(&end, NULL);
free(hostC);
seconds = end.tv_sec - startc.tv_sec;
useconds = end.tv_usec - startc.tv_usec;
mtime = useconds;
mtime/=1000;
mtime+=seconds*1000;
printf("CPU Time: %g\n", mtime);
return 0;
}
| 3c342f02dfd06826206bee5359337a3db26051c5.cu | #include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <curand.h>
#include <cuda.h>
#define N 1024
__global__
void add(int row, int column, float *a, float *b, float *c){
int index = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float sum;
sum = 0;
__syncthreads();
atomicAdd(&sum, a[index + row * N] * b[index * N + column]);
__syncthreads();
c[row*N + column] = sum;
}
__global__
void multiply(float *a, float* b, float* c) {
int column = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if((row) < N && (column) < N){
add<<<1,N>>>(row, column, a, b ,c);
cudaDeviceSynchronize();
}
}
int main() {
struct timeval startc, end;
float ms;
long seconds, useconds;
double mtime;
float *a, *b, *c;
cudaMallocManaged((void **)&a, N*N*sizeof(float));
cudaMallocManaged((void **)&b, N*N*sizeof(float));
cudaMallocManaged((void **)&c, N*N*sizeof(float));
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen, 1234UL);
curandGenerateUniform(gen, a, N*N);
curandGenerateUniform(gen, b, N*N);
cudaDeviceSynchronize();
dim3 threadsPerBlock(N,N);
dim3 blocksPerGrid(1, 1);
if(N*N > 1024){
threadsPerBlock.x = 32;
threadsPerBlock.y = 32;
blocksPerGrid.x = ceil((double)N/(double)32);
blocksPerGrid.y = ceil((double)N/(double)32);
printf("%d - %d , %d - %d\n", blocksPerGrid.x, blocksPerGrid.y, threadsPerBlock.x, threadsPerBlock.y);
}
gettimeofday(&startc, NULL);
multiply<<<blocksPerGrid, threadsPerBlock>>>(a, b, c);
cudaDeviceSynchronize();
gettimeofday(&end, NULL);
seconds = end.tv_sec - startc.tv_sec;
useconds = end.tv_usec - startc.tv_usec;
mtime = useconds;
mtime/=1000;
mtime+=seconds*1000;
printf("\nGPU Time: %g\n", mtime);
float *hostC = (float *)malloc(N*N*(sizeof(float)));
gettimeofday(&startc, NULL);
for(int i = 0; i< N; i++){
for(int j = 0; j < N; j++){
hostC[i*N+j] = 0;
for(int k = 0; k < N; k++){
hostC[i*N+j] += a[i*N+k] * b[k*N+j];
}
}
}
gettimeofday(&end, NULL);
free(hostC);
seconds = end.tv_sec - startc.tv_sec;
useconds = end.tv_usec - startc.tv_usec;
mtime = useconds;
mtime/=1000;
mtime+=seconds*1000;
printf("CPU Time: %g\n", mtime);
return 0;
}
|
8b81a215260621d390dd5b5c64155eba8455558f.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal z -> s d c
*/
#include "common_magma.h"
#define PRECISION_z
#include "commonblas.h"
//
// m, n - dimensions in the output (ha) matrix.
// This routine copies the dat matrix from the GPU
// to ha on the CPU. In addition, the output matrix
// is transposed. The routine uses a buffer of size
// 2*lddb*nb pointed to by dB (lddb > m) on the GPU.
// Note that lda >= m and lddat >= n.
//
extern "C" void
magmablas_zgetmatrix_transpose( magma_int_t m, magma_int_t n,
const hipDoubleComplex *dat, magma_int_t ldda,
hipDoubleComplex *ha, magma_int_t lda,
hipDoubleComplex *dB, magma_int_t lddb, magma_int_t nb )
{
magma_int_t i = 0, j = 0, ib;
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
if (lda < m || ldda < n || lddb < m){
printf("Wrong arguments in zgetmatrix_transpose.\n");
return;
}
hipStream_t stream[2];
magma_queue_create( &stream[0] );
magma_queue_create( &stream[1] );
for(i=0; i<n; i+=nb){
/* Move data from GPU to CPU using 2 buffers; 1st transpose the data on the GPU */
ib = min(n-i, nb);
//magmablas_ztranspose2 ( dB + (j%2)*nb*lddb, lddb, dat+i, ldda, ib, m);
magmablas_ztranspose2s( dB + (j%2)*nb*lddb, lddb, dat+i, ldda, ib, m, &stream[j%2]);
magma_zgetmatrix_async( m, ib,
dB + (j%2) * nb * lddb, lddb,
ha+i*lda, lda, stream[j%2] );
j++;
}
magma_queue_destroy( stream[0] );
magma_queue_destroy( stream[1] );
}
//===========================================================================
// This version is similar to the above but for multiGPUs. The distribution
// is 1D block cyclic. The input arrays are pointers for the corresponding
// GPUs. The streams are passed as argument, in contrast to the single GPU
// routine.
// NOTE: see magmablas_zgetmatrix_transpose_mgpu.
//===========================================================================
extern "C" void
magmablas_zgetmatrix_transpose2( magma_int_t m, magma_int_t n,
const hipDoubleComplex **dat, magma_int_t *ldda,
hipDoubleComplex *ha, magma_int_t lda,
hipDoubleComplex **dB, magma_int_t lddb, magma_int_t nb,
magma_int_t num_gpus, hipStream_t stream[][2] )
{
magma_int_t i = 0, j[4] = {0, 0, 0, 0}, ib, k;
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
if (lda < m || lddb < m){
printf("Wrong arguments in zgetmatrix_transpose2.\n");
return;
}
for(i=0; i<n; i+=nb){
/* Move data from GPU to CPU using 2 buffers; 1st transpose the data on the GPU */
k = (i/nb)%num_gpus;
ib = min(n-i, nb);
magma_setdevice(k);
//magma_queue_sync( stream[k][j[k]%2] );
//magmablas_ztranspose2( dB[k] + (j[k]%2)*nb*lddb, lddb,
// dat[k]+i/(nb*num_gpus)*nb, ldda[k], ib, m);
magmablas_ztranspose2s(dB[k] + (j[k]%2)*nb*lddb, lddb,
dat[k]+i/(nb*num_gpus)*nb, ldda[k],
ib, m, &stream[k][j[k]%2]);
magma_zgetmatrix_async( m, ib,
dB[k] + (j[k]%2) * nb * lddb, lddb,
ha+i*lda, lda, stream[k][j[k]%2] );
j[k]++;
}
}
| 8b81a215260621d390dd5b5c64155eba8455558f.cu | /*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal z -> s d c
*/
#include "common_magma.h"
#define PRECISION_z
#include "commonblas.h"
//
// m, n - dimensions in the output (ha) matrix.
// This routine copies the dat matrix from the GPU
// to ha on the CPU. In addition, the output matrix
// is transposed. The routine uses a buffer of size
// 2*lddb*nb pointed to by dB (lddb > m) on the GPU.
// Note that lda >= m and lddat >= n.
//
extern "C" void
magmablas_zgetmatrix_transpose( magma_int_t m, magma_int_t n,
const cuDoubleComplex *dat, magma_int_t ldda,
cuDoubleComplex *ha, magma_int_t lda,
cuDoubleComplex *dB, magma_int_t lddb, magma_int_t nb )
{
magma_int_t i = 0, j = 0, ib;
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
if (lda < m || ldda < n || lddb < m){
printf("Wrong arguments in zgetmatrix_transpose.\n");
return;
}
cudaStream_t stream[2];
magma_queue_create( &stream[0] );
magma_queue_create( &stream[1] );
for(i=0; i<n; i+=nb){
/* Move data from GPU to CPU using 2 buffers; 1st transpose the data on the GPU */
ib = min(n-i, nb);
//magmablas_ztranspose2 ( dB + (j%2)*nb*lddb, lddb, dat+i, ldda, ib, m);
magmablas_ztranspose2s( dB + (j%2)*nb*lddb, lddb, dat+i, ldda, ib, m, &stream[j%2]);
magma_zgetmatrix_async( m, ib,
dB + (j%2) * nb * lddb, lddb,
ha+i*lda, lda, stream[j%2] );
j++;
}
magma_queue_destroy( stream[0] );
magma_queue_destroy( stream[1] );
}
//===========================================================================
// This version is similar to the above but for multiGPUs. The distribution
// is 1D block cyclic. The input arrays are pointers for the corresponding
// GPUs. The streams are passed as argument, in contrast to the single GPU
// routine.
// NOTE: see magmablas_zgetmatrix_transpose_mgpu.
//===========================================================================
extern "C" void
magmablas_zgetmatrix_transpose2( magma_int_t m, magma_int_t n,
const cuDoubleComplex **dat, magma_int_t *ldda,
cuDoubleComplex *ha, magma_int_t lda,
cuDoubleComplex **dB, magma_int_t lddb, magma_int_t nb,
magma_int_t num_gpus, cudaStream_t stream[][2] )
{
magma_int_t i = 0, j[4] = {0, 0, 0, 0}, ib, k;
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
if (lda < m || lddb < m){
printf("Wrong arguments in zgetmatrix_transpose2.\n");
return;
}
for(i=0; i<n; i+=nb){
/* Move data from GPU to CPU using 2 buffers; 1st transpose the data on the GPU */
k = (i/nb)%num_gpus;
ib = min(n-i, nb);
magma_setdevice(k);
//magma_queue_sync( stream[k][j[k]%2] );
//magmablas_ztranspose2( dB[k] + (j[k]%2)*nb*lddb, lddb,
// dat[k]+i/(nb*num_gpus)*nb, ldda[k], ib, m);
magmablas_ztranspose2s(dB[k] + (j[k]%2)*nb*lddb, lddb,
dat[k]+i/(nb*num_gpus)*nb, ldda[k],
ib, m, &stream[k][j[k]%2]);
magma_zgetmatrix_async( m, ib,
dB[k] + (j[k]%2) * nb * lddb, lddb,
ha+i*lda, lda, stream[k][j[k]%2] );
j[k]++;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.