hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
59ab142aaff0d716c5ae014a1960281c978bd447.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void matAddKernel(float* A, float* B, float* C, int width, int height){
int col = blockDim.x*blockIdx.x + threadIdx.x;
int row = blockDim.y*blockIdx.y + threadIdx.y;
int i = col + row*width;
if(i < width*height){
C[i] = A[i] + B[i];
}
}
void matAdd(float* A, float* B, float* C, int width, int height)
{
int size = width * height * sizeof(float);
static float *d_A, *d_B, *d_C;
hipMalloc((void **) &d_A, size);
hipMemcpy(d_A, A, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_B, size);
hipMemcpy(d_B, B, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_C, size);
dim3 dimGrid(5, 4, 1);
dim3 dimBlock(16, 16, 1);
hipLaunchKernelGGL(( matAddKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, width, height);
hipMemcpy(C, d_C, size, hipMemcpyDeviceToHost);
printf("\nA: \n");
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++){
printf("%2.0f ", A[i + j*width]);
}
printf("\n");
}
printf("\nB: \n");
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++){
printf("%2.0f ", B[i + j*width]);
}
printf("\n");
}
printf("\n-------------------------------------");
printf("\nC: \n");
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++){
printf("%2.0f ", C[i + j*width]);
}
printf("\n");
}
printf("\n-------------------------------------\n");
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
int main() {
int width = 76;
int height = 62;
static float h_A[76*62];
static float h_B[76*62];
static float h_C[76*62];
for (int i = 0; i < width; i++) {
for (int j = 0; j < height; j++) {
h_A[i + j*width] = (i+j)%2;
h_B[i + j*width] = (i+j)%3;
}
}
matAdd(h_A, h_B, h_C, width, height);
} | 59ab142aaff0d716c5ae014a1960281c978bd447.cu | #include <cuda.h>
#include <stdio.h>
__global__ void matAddKernel(float* A, float* B, float* C, int width, int height){
int col = blockDim.x*blockIdx.x + threadIdx.x;
int row = blockDim.y*blockIdx.y + threadIdx.y;
int i = col + row*width;
if(i < width*height){
C[i] = A[i] + B[i];
}
}
void matAdd(float* A, float* B, float* C, int width, int height)
{
int size = width * height * sizeof(float);
static float *d_A, *d_B, *d_C;
cudaMalloc((void **) &d_A, size);
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_B, size);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_C, size);
dim3 dimGrid(5, 4, 1);
dim3 dimBlock(16, 16, 1);
matAddKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, width, height);
cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost);
printf("\nA: \n");
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++){
printf("%2.0f ", A[i + j*width]);
}
printf("\n");
}
printf("\nB: \n");
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++){
printf("%2.0f ", B[i + j*width]);
}
printf("\n");
}
printf("\n-------------------------------------");
printf("\nC: \n");
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++){
printf("%2.0f ", C[i + j*width]);
}
printf("\n");
}
printf("\n-------------------------------------\n");
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
int main() {
int width = 76;
int height = 62;
static float h_A[76*62];
static float h_B[76*62];
static float h_C[76*62];
for (int i = 0; i < width; i++) {
for (int j = 0; j < height; j++) {
h_A[i + j*width] = (i+j)%2;
h_B[i + j*width] = (i+j)%3;
}
}
matAdd(h_A, h_B, h_C, width, height);
} |
a0a2c0185a4052eee3a4494939731824a56ab846.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "_badd.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int nrows = 1;
int ncols = 1;
float *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
_badd), dim3(gridBlock),dim3(threadBlock), 0, 0, nrows,ncols,y,b);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
_badd), dim3(gridBlock),dim3(threadBlock), 0, 0, nrows,ncols,y,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
_badd), dim3(gridBlock),dim3(threadBlock), 0, 0, nrows,ncols,y,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a0a2c0185a4052eee3a4494939731824a56ab846.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "_badd.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int nrows = 1;
int ncols = 1;
float *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
_badd<<<gridBlock,threadBlock>>>(nrows,ncols,y,b);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
_badd<<<gridBlock,threadBlock>>>(nrows,ncols,y,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
_badd<<<gridBlock,threadBlock>>>(nrows,ncols,y,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e262b706de38f0fe78f957ad6e6739589ac79fb8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Based on code from here: http://devblogs.nvidia.com/parallelforall/easy-introduction-cuda-c-and-c/ */
#include <stdio.h>
#include <stdlib.h>
/* Calculate SAXPY, single-precision vector math */
/* y[i]=a*x[i]+y[i] */
__global__
void saxpy (int n, float a, float *x, float *y) {
int i=threadIdx.x;
/* Only run calculation if we are in range */
/* where i is valid. It can be out of range */
/* if our vector is shorter than a */
/* multiple of the blocksize */
if (i<n) {
y[i]=a*x[i]+y[i];
}
}
int main(int argc, char **argv) {
int i;
float *x, *y, *dev_x, *dev_y;
float a;
long long N=(1000*1000*8);
if (argc>1) {
N=atoll(argv[1]);
}
/* Allocate vectors on CPU */
x=(float *)malloc(N*sizeof(float));
y=(float *)malloc(N*sizeof(float));
/* Allocate vectors on GPU */
hipMalloc((void **)&dev_x,N*sizeof(float));
hipMalloc((void **)&dev_y,N*sizeof(float));
/* Initialize the host vectors */
for(i=0;i<N;i++) {
x[i]=(float)i;
y[i]=(float)(10.0*i);
}
hipMemcpy(dev_x,x,N*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(dev_y,y,N*sizeof(float),hipMemcpyHostToDevice);
printf("Size: %d\n",(N+255)/256);
a=5.0;
/* Perform SAXPY */
hipLaunchKernelGGL(( saxpy), dim3(1),dim3(N), 0, 0, N,a,dev_x,dev_y);
// make the host block until the device is finished
hipDeviceSynchronize();
// check for error
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
hipMemcpy(y,dev_y,N*sizeof(float),hipMemcpyDeviceToHost);
/* results */
i=100;
printf("y[%d]=%f, y[%lld]=%f\n",i,y[i],N-1,y[N-1]);
/* y[i]=a*x[i]+y[i] */
/* 0: a=5, x=0, y=0 ::::::: y=0 */
/* 1: a=5, x=1, y=10 ::::::: y=15 */
/* 2: a=5, x=2, y=20 ::::::: y=30 */
/* 3: a=5, x=3, y=30 ::::::: y=45 */
/* 4: a=5, x=4, y=40 ::::::: y=60 */
/* ... */
/* 100: a=5, x=100, y=1000 y=1500 */
hipFree(dev_x);
hipFree(dev_y);
return 0;
}
| e262b706de38f0fe78f957ad6e6739589ac79fb8.cu | /* Based on code from here: http://devblogs.nvidia.com/parallelforall/easy-introduction-cuda-c-and-c/ */
#include <stdio.h>
#include <stdlib.h>
/* Calculate SAXPY, single-precision vector math */
/* y[i]=a*x[i]+y[i] */
__global__
void saxpy (int n, float a, float *x, float *y) {
int i=threadIdx.x;
/* Only run calculation if we are in range */
/* where i is valid. It can be out of range */
/* if our vector is shorter than a */
/* multiple of the blocksize */
if (i<n) {
y[i]=a*x[i]+y[i];
}
}
int main(int argc, char **argv) {
int i;
float *x, *y, *dev_x, *dev_y;
float a;
long long N=(1000*1000*8);
if (argc>1) {
N=atoll(argv[1]);
}
/* Allocate vectors on CPU */
x=(float *)malloc(N*sizeof(float));
y=(float *)malloc(N*sizeof(float));
/* Allocate vectors on GPU */
cudaMalloc((void **)&dev_x,N*sizeof(float));
cudaMalloc((void **)&dev_y,N*sizeof(float));
/* Initialize the host vectors */
for(i=0;i<N;i++) {
x[i]=(float)i;
y[i]=(float)(10.0*i);
}
cudaMemcpy(dev_x,x,N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_y,y,N*sizeof(float),cudaMemcpyHostToDevice);
printf("Size: %d\n",(N+255)/256);
a=5.0;
/* Perform SAXPY */
saxpy<<<1,N>>>(N,a,dev_x,dev_y);
// make the host block until the device is finished
cudaDeviceSynchronize();
// check for error
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
cudaMemcpy(y,dev_y,N*sizeof(float),cudaMemcpyDeviceToHost);
/* results */
i=100;
printf("y[%d]=%f, y[%lld]=%f\n",i,y[i],N-1,y[N-1]);
/* y[i]=a*x[i]+y[i] */
/* 0: a=5, x=0, y=0 ::::::: y=0 */
/* 1: a=5, x=1, y=10 ::::::: y=15 */
/* 2: a=5, x=2, y=20 ::::::: y=30 */
/* 3: a=5, x=3, y=30 ::::::: y=45 */
/* 4: a=5, x=4, y=40 ::::::: y=60 */
/* ... */
/* 100: a=5, x=100, y=1000 y=1500 */
cudaFree(dev_x);
cudaFree(dev_y);
return 0;
}
|
70bd11fe0e54398ce2cf838f7c22e7b17e417b7c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <iostream>
#include <linalg/reduce_rows_by_key.cuh>
#include <random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename Type>
__global__ void naiveReduceRowsByKeyKernel(const Type *d_A, int lda,
uint32_t *d_keys,
const Type *d_weight,
char *d_char_keys, int nrows,
int ncols, int nkeys, Type *d_sums) {
int c = threadIdx.x + blockIdx.x * blockDim.x;
if (c >= ncols) return;
int this_key = threadIdx.y + blockIdx.y * blockDim.y;
Type sum = 0.0;
for (int r = 0; r < nrows; r++) {
if (this_key != d_keys[r]) continue;
Type wt = 1;
if (d_weight) wt = d_weight[r];
sum += d_A[lda * r + c] * wt;
}
d_sums[this_key * ncols + c] = sum;
}
template <typename Type>
void naiveReduceRowsByKey(const Type *d_A, int lda, uint32_t *d_keys,
const Type *d_weight, char *d_char_keys, int nrows,
int ncols, int nkeys, Type *d_sums,
hipStream_t stream) {
hipMemset(d_sums, 0, sizeof(Type) * nkeys * ncols);
hipLaunchKernelGGL(( naiveReduceRowsByKeyKernel), dim3(dim3((ncols + 31) / 32, nkeys)), dim3(dim3(32, 1)), 0,
stream,
d_A, lda, d_keys, d_weight, d_char_keys, nrows, ncols, nkeys, d_sums);
}
template <typename T>
struct ReduceRowsInputs {
T tolerance;
int nobs;
uint32_t cols;
uint32_t nkeys;
unsigned long long int seed;
bool weighted;
T max_weight;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os,
const ReduceRowsInputs<T> &dims) {
return os;
}
template <typename T>
class ReduceRowTest : public ::testing::TestWithParam<ReduceRowsInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<ReduceRowsInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
raft::random::Rng r_int(params.seed);
CUDA_CHECK(hipStreamCreate(&stream));
int nobs = params.nobs;
uint32_t cols = params.cols;
uint32_t nkeys = params.nkeys;
allocate(in, nobs * cols);
allocate(keys, nobs);
allocate(scratch_buf, nobs);
allocate(out_ref, nkeys * cols);
allocate(out, nkeys * cols);
r.uniform(in, nobs * cols, T(0.0), T(2.0 / nobs), stream);
r_int.uniformInt(keys, nobs, (uint32_t)0, nkeys, stream);
if (params.weighted) {
allocate(weight, nobs);
raft::random::Rng r(params.seed, raft::random::GeneratorType::GenPhilox);
r.uniform(weight, nobs, T(1), params.max_weight, stream);
} else {
weight = nullptr;
}
naiveReduceRowsByKey(in, cols, keys, weight, scratch_buf, nobs, cols, nkeys,
out_ref, stream);
if (params.weighted) {
reduce_rows_by_key(in, cols, keys, weight, scratch_buf, nobs, cols, nkeys,
out, stream);
} else {
reduce_rows_by_key(in, cols, keys, scratch_buf, nobs, cols, nkeys, out,
stream);
}
CUDA_CHECK(hipStreamSynchronize(stream));
}
void TearDown() override {
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(keys));
CUDA_CHECK(hipFree(scratch_buf));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipStreamDestroy(stream));
}
protected:
hipStream_t stream;
ReduceRowsInputs<T> params;
T *in, *out_ref, *out;
T *weight;
uint32_t *keys;
char *scratch_buf;
int device_count = 0;
};
// ReduceRowTestF
// 128 Obs, 32 cols, 6 clusters
const std::vector<ReduceRowsInputs<float>> inputsf2 = {
{0.000001f, 128, 32, 6, 1234ULL, false},
{0.000001f, 128, 32, 6, 1234ULL, true, 1.0},
{0.000001f, 128, 32, 6, 1234ULL, true, 2.0}};
typedef ReduceRowTest<float> ReduceRowTestF;
TEST_P(ReduceRowTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.cols * params.nkeys,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestF,
::testing::ValuesIn(inputsf2));
// ReduceRowTestD
// 128 Obs, 32 cols, 6 clusters, double precision
const std::vector<ReduceRowsInputs<double>> inputsd2 = {
{0.00000001, 128, 32, 6, 1234ULL, false},
{0.00000001, 128, 32, 6, 1234ULL, true, 2.0},
{0.00000001, 128, 32, 6, 1234ULL, true, 8.0}};
typedef ReduceRowTest<double> ReduceRowTestD;
TEST_P(ReduceRowTestD, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.cols * params.nkeys,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestD,
::testing::ValuesIn(inputsd2));
// ReduceRowTestSmallnKey
// 128 Obs, 32 cols, 3 clusters
const std::vector<ReduceRowsInputs<float>> inputsf_small_nkey = {
{0.000001f, 128, 32, 3, 1234ULL, false},
{0.000001f, 128, 32, 3, 1234ULL, true, 5.0},
{0.000001f, 128, 32, 3, 1234ULL, true, 8.0}};
typedef ReduceRowTest<float> ReduceRowTestSmallnKey;
TEST_P(ReduceRowTestSmallnKey, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.cols * params.nkeys,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestSmallnKey,
::testing::ValuesIn(inputsf_small_nkey));
// ReduceRowTestBigSpace
// 512 Obs, 1024 cols, 32 clusters, double precision
const std::vector<ReduceRowsInputs<double>> inputsd_big_space = {
{0.00000001, 512, 1024, 40, 1234ULL, false},
{0.00000001, 512, 1024, 40, 1234ULL, true, 4.0},
{0.00000001, 512, 1024, 40, 1234ULL, true, 16.0}};
typedef ReduceRowTest<double> ReduceRowTestBigSpace;
TEST_P(ReduceRowTestBigSpace, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.cols * params.nkeys,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestBigSpace,
::testing::ValuesIn(inputsd_big_space));
// ReduceRowTestManyObs
// 100000 Obs, 37 cols, 32 clusters
const std::vector<ReduceRowsInputs<float>> inputsf_many_obs = {
{0.00001f, 100000, 37, 32, 1234ULL, false},
{0.00001f, 100000, 37, 32, 1234ULL, true, 4.0},
{0.00001f, 100000, 37, 32, 1234ULL, true, 16.0}};
typedef ReduceRowTest<float> ReduceRowTestManyObs;
TEST_P(ReduceRowTestManyObs, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.cols * params.nkeys,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestManyObs,
::testing::ValuesIn(inputsf_many_obs));
// ReduceRowTestManyClusters
// 100000 Obs, 37 cols, 2048 clusters
const std::vector<ReduceRowsInputs<float>> inputsf_many_cluster = {
{0.00001f, 100000, 37, 2048, 1234ULL, false},
{0.00001f, 100000, 37, 2048, 1234ULL, true, 32.0},
{0.00001f, 100000, 37, 2048, 1234ULL, true, 16.0}};
typedef ReduceRowTest<float> ReduceRowTestManyClusters;
TEST_P(ReduceRowTestManyClusters, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.cols * params.nkeys,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestManyClusters,
::testing::ValuesIn(inputsf_many_cluster));
} // end namespace LinAlg
} // end namespace MLCommon
| 70bd11fe0e54398ce2cf838f7c22e7b17e417b7c.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <iostream>
#include <linalg/reduce_rows_by_key.cuh>
#include <random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename Type>
__global__ void naiveReduceRowsByKeyKernel(const Type *d_A, int lda,
uint32_t *d_keys,
const Type *d_weight,
char *d_char_keys, int nrows,
int ncols, int nkeys, Type *d_sums) {
int c = threadIdx.x + blockIdx.x * blockDim.x;
if (c >= ncols) return;
int this_key = threadIdx.y + blockIdx.y * blockDim.y;
Type sum = 0.0;
for (int r = 0; r < nrows; r++) {
if (this_key != d_keys[r]) continue;
Type wt = 1;
if (d_weight) wt = d_weight[r];
sum += d_A[lda * r + c] * wt;
}
d_sums[this_key * ncols + c] = sum;
}
template <typename Type>
void naiveReduceRowsByKey(const Type *d_A, int lda, uint32_t *d_keys,
const Type *d_weight, char *d_char_keys, int nrows,
int ncols, int nkeys, Type *d_sums,
cudaStream_t stream) {
cudaMemset(d_sums, 0, sizeof(Type) * nkeys * ncols);
naiveReduceRowsByKeyKernel<<<dim3((ncols + 31) / 32, nkeys), dim3(32, 1), 0,
stream>>>(
d_A, lda, d_keys, d_weight, d_char_keys, nrows, ncols, nkeys, d_sums);
}
template <typename T>
struct ReduceRowsInputs {
T tolerance;
int nobs;
uint32_t cols;
uint32_t nkeys;
unsigned long long int seed;
bool weighted;
T max_weight;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os,
const ReduceRowsInputs<T> &dims) {
return os;
}
template <typename T>
class ReduceRowTest : public ::testing::TestWithParam<ReduceRowsInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<ReduceRowsInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
raft::random::Rng r_int(params.seed);
CUDA_CHECK(cudaStreamCreate(&stream));
int nobs = params.nobs;
uint32_t cols = params.cols;
uint32_t nkeys = params.nkeys;
allocate(in, nobs * cols);
allocate(keys, nobs);
allocate(scratch_buf, nobs);
allocate(out_ref, nkeys * cols);
allocate(out, nkeys * cols);
r.uniform(in, nobs * cols, T(0.0), T(2.0 / nobs), stream);
r_int.uniformInt(keys, nobs, (uint32_t)0, nkeys, stream);
if (params.weighted) {
allocate(weight, nobs);
raft::random::Rng r(params.seed, raft::random::GeneratorType::GenPhilox);
r.uniform(weight, nobs, T(1), params.max_weight, stream);
} else {
weight = nullptr;
}
naiveReduceRowsByKey(in, cols, keys, weight, scratch_buf, nobs, cols, nkeys,
out_ref, stream);
if (params.weighted) {
reduce_rows_by_key(in, cols, keys, weight, scratch_buf, nobs, cols, nkeys,
out, stream);
} else {
reduce_rows_by_key(in, cols, keys, scratch_buf, nobs, cols, nkeys, out,
stream);
}
CUDA_CHECK(cudaStreamSynchronize(stream));
}
void TearDown() override {
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(keys));
CUDA_CHECK(cudaFree(scratch_buf));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaStreamDestroy(stream));
}
protected:
cudaStream_t stream;
ReduceRowsInputs<T> params;
T *in, *out_ref, *out;
T *weight;
uint32_t *keys;
char *scratch_buf;
int device_count = 0;
};
// ReduceRowTestF
// 128 Obs, 32 cols, 6 clusters
const std::vector<ReduceRowsInputs<float>> inputsf2 = {
{0.000001f, 128, 32, 6, 1234ULL, false},
{0.000001f, 128, 32, 6, 1234ULL, true, 1.0},
{0.000001f, 128, 32, 6, 1234ULL, true, 2.0}};
typedef ReduceRowTest<float> ReduceRowTestF;
TEST_P(ReduceRowTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.cols * params.nkeys,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestF,
::testing::ValuesIn(inputsf2));
// ReduceRowTestD
// 128 Obs, 32 cols, 6 clusters, double precision
const std::vector<ReduceRowsInputs<double>> inputsd2 = {
{0.00000001, 128, 32, 6, 1234ULL, false},
{0.00000001, 128, 32, 6, 1234ULL, true, 2.0},
{0.00000001, 128, 32, 6, 1234ULL, true, 8.0}};
typedef ReduceRowTest<double> ReduceRowTestD;
TEST_P(ReduceRowTestD, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.cols * params.nkeys,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestD,
::testing::ValuesIn(inputsd2));
// ReduceRowTestSmallnKey
// 128 Obs, 32 cols, 3 clusters
const std::vector<ReduceRowsInputs<float>> inputsf_small_nkey = {
{0.000001f, 128, 32, 3, 1234ULL, false},
{0.000001f, 128, 32, 3, 1234ULL, true, 5.0},
{0.000001f, 128, 32, 3, 1234ULL, true, 8.0}};
typedef ReduceRowTest<float> ReduceRowTestSmallnKey;
TEST_P(ReduceRowTestSmallnKey, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.cols * params.nkeys,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestSmallnKey,
::testing::ValuesIn(inputsf_small_nkey));
// ReduceRowTestBigSpace
// 512 Obs, 1024 cols, 32 clusters, double precision
const std::vector<ReduceRowsInputs<double>> inputsd_big_space = {
{0.00000001, 512, 1024, 40, 1234ULL, false},
{0.00000001, 512, 1024, 40, 1234ULL, true, 4.0},
{0.00000001, 512, 1024, 40, 1234ULL, true, 16.0}};
typedef ReduceRowTest<double> ReduceRowTestBigSpace;
TEST_P(ReduceRowTestBigSpace, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.cols * params.nkeys,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestBigSpace,
::testing::ValuesIn(inputsd_big_space));
// ReduceRowTestManyObs
// 100000 Obs, 37 cols, 32 clusters
const std::vector<ReduceRowsInputs<float>> inputsf_many_obs = {
{0.00001f, 100000, 37, 32, 1234ULL, false},
{0.00001f, 100000, 37, 32, 1234ULL, true, 4.0},
{0.00001f, 100000, 37, 32, 1234ULL, true, 16.0}};
typedef ReduceRowTest<float> ReduceRowTestManyObs;
TEST_P(ReduceRowTestManyObs, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.cols * params.nkeys,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestManyObs,
::testing::ValuesIn(inputsf_many_obs));
// ReduceRowTestManyClusters
// 100000 Obs, 37 cols, 2048 clusters
const std::vector<ReduceRowsInputs<float>> inputsf_many_cluster = {
{0.00001f, 100000, 37, 2048, 1234ULL, false},
{0.00001f, 100000, 37, 2048, 1234ULL, true, 32.0},
{0.00001f, 100000, 37, 2048, 1234ULL, true, 16.0}};
typedef ReduceRowTest<float> ReduceRowTestManyClusters;
TEST_P(ReduceRowTestManyClusters, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.cols * params.nkeys,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestManyClusters,
::testing::ValuesIn(inputsf_many_cluster));
} // end namespace LinAlg
} // end namespace MLCommon
|
cffb31b17c76469a130bbe00766993c7b90b178f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Discrete Sine Transform in Column wise (DST two)
* DST_II_Column
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DST_II_Column(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DST_II_Column.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// #define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const float PI_d = 3.141592653589793238462643383279502884f; //pi
template <unsigned int TILE_DIM > __global__ void DSTII_Column_Kernel(float *A, float *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
float CValue = 0.0f;
const float PI_d = 3.141592653589793238462643383279502884f; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ float As[TILE_DIM][TILE_DIM];
__shared__ float Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numARows - 1) / TILE_DIM; k++) {
//As[threadIdx.y][threadIdx.x] = cosf(((2 * (threadIdx.x + (k*TILE_DIM)) + 1) / (2.0 * numARows))*PI_d*Row)*sqrt(1.0 / (1 + DELTA(1, Row + 1)))*sqrt(2.0 / numARows);
if (k*TILE_DIM + threadIdx.x < numARows && Row < numARows) { As[threadIdx.y][threadIdx.x] = sinf(((Row + 0.5)*PI_d*((threadIdx.x + (k*TILE_DIM)) + 1)) / (numARows))*sqrt((2.0 - DELTA((threadIdx.x + (k*TILE_DIM)) + 1, numARows)) / (numARows)); }
else { As[threadIdx.y][threadIdx.x] = 0.0; }
if (k*TILE_DIM + threadIdx.y < numARows && Col < numAColumns){ Bs[threadIdx.y][threadIdx.x] = A[(k*TILE_DIM + threadIdx.y)*numAColumns + Col]; }
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDSTColumnTwo(float * A, float * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
float * hostA = A; // The A matrix
//float * hostB = B; // The B matrix
float * hostC = C; // The output C matrix
//float * hostComputedC;
float * deviceA;
//float * deviceB;
float * deviceC;
//hostA = (float *)malloc(sizeof(float)*numARows*numAColumns);
hipError_t error;
int devID = 0;
// get number of SMs on this GPU
error = hipGetDevice(&devID);
hipDeviceProp_t deviceProp;
error = hipGetDeviceProperties(&deviceProp, devID);
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
int TILEDIM = (deviceProp.major < 2) ? 16 : 32;
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(hipMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns));
//hipMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns);
gpuErrchk(hipMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns));
//thrust::device_ptr< float >dev_ptr_A(deviceA);
//thrust::device_ptr< float >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(hipMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, hipMemcpyHostToDevice));
//hipMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, hipMemcpyHostToDevice);
/////////////////////////////////////////////////////////
unsigned int TILE_DIM=16;
dim3 dimBlock;
dim3 dimGrid;
switch (TILEDIM){
case 16:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTII_Column_Kernel <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(hipFree(deviceA));
//hipFree(deviceB);
gpuErrchk(hipFree(deviceC));
return;
case 32:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTII_Column_Kernel <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(hipFree(deviceA));
//hipFree(deviceB);
gpuErrchk(hipFree(deviceC));
return;
}
}
| cffb31b17c76469a130bbe00766993c7b90b178f.cu | /*
* Discrete Sine Transform in Column wise (DST two)
* DST_II_Column
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DST_II_Column(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DST_II_Column.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cuda.h>
#include <cuda_runtime.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// #define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const float PI_d = 3.141592653589793238462643383279502884f; //pi
template <unsigned int TILE_DIM > __global__ void DSTII_Column_Kernel(float *A, float *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
float CValue = 0.0f;
const float PI_d = 3.141592653589793238462643383279502884f; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ float As[TILE_DIM][TILE_DIM];
__shared__ float Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numARows - 1) / TILE_DIM; k++) {
//As[threadIdx.y][threadIdx.x] = cosf(((2 * (threadIdx.x + (k*TILE_DIM)) + 1) / (2.0 * numARows))*PI_d*Row)*sqrt(1.0 / (1 + DELTA(1, Row + 1)))*sqrt(2.0 / numARows);
if (k*TILE_DIM + threadIdx.x < numARows && Row < numARows) { As[threadIdx.y][threadIdx.x] = sinf(((Row + 0.5)*PI_d*((threadIdx.x + (k*TILE_DIM)) + 1)) / (numARows))*sqrt((2.0 - DELTA((threadIdx.x + (k*TILE_DIM)) + 1, numARows)) / (numARows)); }
else { As[threadIdx.y][threadIdx.x] = 0.0; }
if (k*TILE_DIM + threadIdx.y < numARows && Col < numAColumns){ Bs[threadIdx.y][threadIdx.x] = A[(k*TILE_DIM + threadIdx.y)*numAColumns + Col]; }
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDSTColumnTwo(float * A, float * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
float * hostA = A; // The A matrix
//float * hostB = B; // The B matrix
float * hostC = C; // The output C matrix
//float * hostComputedC;
float * deviceA;
//float * deviceB;
float * deviceC;
//hostA = (float *)malloc(sizeof(float)*numARows*numAColumns);
cudaError_t error;
int devID = 0;
// get number of SMs on this GPU
error = cudaGetDevice(&devID);
cudaDeviceProp deviceProp;
error = cudaGetDeviceProperties(&deviceProp, devID);
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
int TILEDIM = (deviceProp.major < 2) ? 16 : 32;
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(cudaMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns));
//cudaMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns);
gpuErrchk(cudaMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns));
//thrust::device_ptr< float >dev_ptr_A(deviceA);
//thrust::device_ptr< float >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(cudaMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, cudaMemcpyHostToDevice));
//cudaMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, cudaMemcpyHostToDevice);
/////////////////////////////////////////////////////////
unsigned int TILE_DIM=16;
dim3 dimBlock;
dim3 dimGrid;
switch (TILEDIM){
case 16:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTII_Column_Kernel <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(cudaFree(deviceA));
//cudaFree(deviceB);
gpuErrchk(cudaFree(deviceC));
return;
case 32:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTII_Column_Kernel <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(cudaFree(deviceA));
//cudaFree(deviceB);
gpuErrchk(cudaFree(deviceC));
return;
}
}
|
d3311515afb7a8180f241102e2595900f18d4620.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "parallel.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) {
if (code != hipSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void initialize(u32 *d_out, u32 value){
u32 id = threadIdx.x + blockIdx.x * blockDim.x;
if (id > MAX_BINS - 1) return;
d_out[id] = value;
}
__global__ void m_hist(u8 *d_in, u32 *d_out, u32 length) {
u32 id = threadIdx.x + blockIdx.x * blockDim.x;
if (id > length - 1) return;
/*
//__shared__ bool bin_mat[MAX_BINS * MAX_THREADS];
__shared__ u8 bin_arr[MAX_THREADS];
//TODO: Check if bool is initialized by default to false
//bin[threadIdx.x] = 0;
//__syncthreads();
u8 bin_index = d_in[id];
//u32 offset = threadIdx.x * MAX_BINS + bin_index;
//bin_mat[offset] = true;
bin_arr[threadIdx.x] = bin_index;
__syncthreads();
__shared__ u32 bin[MAX_BINS];
if (threadIdx.x < MAX_BINS) {
//TODO: Initialization check
bin[threadIdx.x] = 0;
for(int i = 0; i < MAX_THREADS; i++) {
if(bin_arr[i] == threadIdx.x) bin[threadIdx.x]++;
}
if(bin[threadIdx.x] != 0) atomicAdd(&d_out[threadIdx.x], bin[threadIdx.x]);
}*/
u32 bin = int(d_in[id]);
atomicAdd(&d_out[bin], 1);
}
int histogram(u8 *h_in, u32 *h_out, u32 length) {
for (int i = 0; i < length; i++) {
h_out[h_in[i]]++;
}
return 0;
}
int m_histogram(u8 *h_in, u32 *h_out, u32 length) {
/**
* Allocate memory for the bins [0-255]
*/
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
u32 *d_out;
hipMalloc((void **) &d_out, MAX_BINS * sizeof(u32));
/**
* Call kernel initialize() to initialize all values to 0.
*/
dim3 grid = dim3((int) ceil((float) MAX_BINS/MAX_THREADS));
dim3 block = dim3(MAX_THREADS, 1, 1);
hipLaunchKernelGGL(( initialize), dim3(grid), dim3(block), 0, 0, d_out, 0);
/**
* Copy the host data to machine.
*/
u8 *d_in;
hipMalloc((void **) &d_in, length * sizeof(u8));
gpuErrchk(hipMemcpy(d_in, h_in, length * sizeof(u8), hipMemcpyHostToDevice));
/**
* Call kernel m_hist() to count the individual values.
*/
grid = dim3((int) ceil((float) length/MAX_THREADS));
//std::cout<<sizeof(bool);
hipFuncSetCacheConfig(m_hist, hipFuncCachePreferShared);
hipLaunchKernelGGL(( m_hist), dim3(grid), dim3(block), 0, 0, d_in, d_out, length);
hipMemcpy(h_out, d_out, MAX_BINS * sizeof(u32), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
//std::cout<<"Time taken "<<time<<std::endl;
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree(d_in);
hipFree(d_out);
return 0;
}
| d3311515afb7a8180f241102e2595900f18d4620.cu | #include "parallel.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) {
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void initialize(u32 *d_out, u32 value){
u32 id = threadIdx.x + blockIdx.x * blockDim.x;
if (id > MAX_BINS - 1) return;
d_out[id] = value;
}
__global__ void m_hist(u8 *d_in, u32 *d_out, u32 length) {
u32 id = threadIdx.x + blockIdx.x * blockDim.x;
if (id > length - 1) return;
/*
//__shared__ bool bin_mat[MAX_BINS * MAX_THREADS];
__shared__ u8 bin_arr[MAX_THREADS];
//TODO: Check if bool is initialized by default to false
//bin[threadIdx.x] = 0;
//__syncthreads();
u8 bin_index = d_in[id];
//u32 offset = threadIdx.x * MAX_BINS + bin_index;
//bin_mat[offset] = true;
bin_arr[threadIdx.x] = bin_index;
__syncthreads();
__shared__ u32 bin[MAX_BINS];
if (threadIdx.x < MAX_BINS) {
//TODO: Initialization check
bin[threadIdx.x] = 0;
for(int i = 0; i < MAX_THREADS; i++) {
if(bin_arr[i] == threadIdx.x) bin[threadIdx.x]++;
}
if(bin[threadIdx.x] != 0) atomicAdd(&d_out[threadIdx.x], bin[threadIdx.x]);
}*/
u32 bin = int(d_in[id]);
atomicAdd(&d_out[bin], 1);
}
int histogram(u8 *h_in, u32 *h_out, u32 length) {
for (int i = 0; i < length; i++) {
h_out[h_in[i]]++;
}
return 0;
}
int m_histogram(u8 *h_in, u32 *h_out, u32 length) {
/**
* Allocate memory for the bins [0-255]
*/
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
u32 *d_out;
cudaMalloc((void **) &d_out, MAX_BINS * sizeof(u32));
/**
* Call kernel initialize() to initialize all values to 0.
*/
dim3 grid = dim3((int) ceil((float) MAX_BINS/MAX_THREADS));
dim3 block = dim3(MAX_THREADS, 1, 1);
initialize<<<grid, block>>>(d_out, 0);
/**
* Copy the host data to machine.
*/
u8 *d_in;
cudaMalloc((void **) &d_in, length * sizeof(u8));
gpuErrchk(cudaMemcpy(d_in, h_in, length * sizeof(u8), cudaMemcpyHostToDevice));
/**
* Call kernel m_hist() to count the individual values.
*/
grid = dim3((int) ceil((float) length/MAX_THREADS));
//std::cout<<sizeof(bool);
cudaFuncSetCacheConfig(m_hist, cudaFuncCachePreferShared);
m_hist<<<grid, block>>>(d_in, d_out, length);
cudaMemcpy(h_out, d_out, MAX_BINS * sizeof(u32), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
//std::cout<<"Time taken "<<time<<std::endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
99cd2d4158fdd9950474f7ec04fb1841dba7f5d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <hipcub/hipcub.hpp>
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace random {
using namespace raft::random;
enum RandomType { RNG_Uniform };
template <typename T, int TPB>
__global__ void meanKernel(float* out, const T* data, int len)
{
typedef hipcub::BlockReduce<float, TPB> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
float val = tid < len ? data[tid] : T(0);
float x = BlockReduce(temp_storage).Sum(val);
__syncthreads();
float xx = BlockReduce(temp_storage).Sum(val * val);
__syncthreads();
if (threadIdx.x == 0) {
raft::myAtomicAdd(out, x);
raft::myAtomicAdd(out + 1, xx);
}
}
template <typename T>
struct RngInputs {
float tolerance;
int len;
// start, end: for uniform
// mean, sigma: for normal/lognormal
// mean, beta: for gumbel
// mean, scale: for logistic and laplace
// lambda: for exponential
// sigma: for rayleigh
T start, end;
RandomType type;
GeneratorType gtype;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const RngInputs<T>& dims)
{
return os;
}
template <typename T>
class RngTest : public ::testing::TestWithParam<RngInputs<T>> {
public:
RngTest()
: params(::testing::TestWithParam<RngInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
data(0, stream),
stats(2, stream)
{
data.resize(params.len, stream);
RAFT_CUDA_TRY(hipMemsetAsync(stats.data(), 0, 2 * sizeof(float), stream));
}
protected:
void SetUp() override
{
RngState r(params.seed, params.gtype);
switch (params.type) {
case RNG_Uniform:
uniformInt(handle, r, data.data(), params.len, params.start, params.end);
break;
};
static const int threads = 128;
hipLaunchKernelGGL(( meanKernel<T, threads>), dim3(raft::ceildiv(params.len, threads)), dim3(threads), 0, stream,
stats.data(), data.data(), params.len);
update_host<float>(h_stats, stats.data(), 2, stream);
resource::sync_stream(handle, stream);
h_stats[0] /= params.len;
h_stats[1] = (h_stats[1] / params.len) - (h_stats[0] * h_stats[0]);
resource::sync_stream(handle, stream);
}
void getExpectedMeanVar(float meanvar[2])
{
switch (params.type) {
case RNG_Uniform:
meanvar[0] = (params.start + params.end) * 0.5f;
meanvar[1] = params.end - params.start;
meanvar[1] = meanvar[1] * meanvar[1] / 12.f;
break;
};
}
protected:
raft::resources handle;
hipStream_t stream;
RngInputs<T> params;
rmm::device_uvector<T> data;
rmm::device_uvector<float> stats;
float h_stats[2]; // mean, var
};
template <typename T>
class RngMdspanTest : public ::testing::TestWithParam<RngInputs<T>> {
public:
RngMdspanTest()
: params(::testing::TestWithParam<RngInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
data(0, stream),
stats(2, stream)
{
data.resize(params.len, stream);
RAFT_CUDA_TRY(hipMemsetAsync(stats.data(), 0, 2 * sizeof(float), stream));
}
protected:
void SetUp() override
{
RngState r(params.seed, params.gtype);
raft::device_vector_view<T> data_view(data.data(), data.size());
switch (params.type) {
case RNG_Uniform: uniformInt(handle, r, data_view, params.start, params.end); break;
};
static const int threads = 128;
hipLaunchKernelGGL(( meanKernel<T, threads>), dim3(raft::ceildiv(params.len, threads)), dim3(threads), 0, stream,
stats.data(), data.data(), params.len);
update_host<float>(h_stats, stats.data(), 2, stream);
resource::sync_stream(handle, stream);
h_stats[0] /= params.len;
h_stats[1] = (h_stats[1] / params.len) - (h_stats[0] * h_stats[0]);
resource::sync_stream(handle, stream);
}
void getExpectedMeanVar(float meanvar[2])
{
switch (params.type) {
case RNG_Uniform:
meanvar[0] = (params.start + params.end) * 0.5f;
meanvar[1] = params.end - params.start;
meanvar[1] = meanvar[1] * meanvar[1] / 12.f;
break;
};
}
protected:
raft::resources handle;
hipStream_t stream;
RngInputs<T> params;
rmm::device_uvector<T> data;
rmm::device_uvector<float> stats;
float h_stats[2]; // mean, var
};
const std::vector<RngInputs<uint32_t>> inputs_u32 = {
{0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL},
{0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL},
{0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL},
{0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL}};
using RngTestU32 = RngTest<uint32_t>;
TEST_P(RngTestU32, Result)
{
float meanvar[2];
getExpectedMeanVar(meanvar);
ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(RngTests, RngTestU32, ::testing::ValuesIn(inputs_u32));
using RngMdspanTestU32 = RngMdspanTest<uint32_t>;
TEST_P(RngMdspanTestU32, Result)
{
float meanvar[2];
getExpectedMeanVar(meanvar);
ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(RngMdspanTests, RngMdspanTestU32, ::testing::ValuesIn(inputs_u32));
const std::vector<RngInputs<uint64_t>> inputs_u64 = {
{0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL},
{0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL},
{0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL},
{0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL}};
using RngTestU64 = RngTest<uint64_t>;
TEST_P(RngTestU64, Result)
{
float meanvar[2];
getExpectedMeanVar(meanvar);
ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(RngTests, RngTestU64, ::testing::ValuesIn(inputs_u64));
using RngMdspanTestU64 = RngMdspanTest<uint64_t>;
TEST_P(RngMdspanTestU64, Result)
{
float meanvar[2];
getExpectedMeanVar(meanvar);
ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(RngMdspanTests, RngMdspanTestU64, ::testing::ValuesIn(inputs_u64));
const std::vector<RngInputs<int32_t>> inputs_s32 = {
{0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL},
{0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL},
{0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL},
{0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL}};
using RngTestS32 = RngTest<int32_t>;
TEST_P(RngTestS32, Result)
{
float meanvar[2];
getExpectedMeanVar(meanvar);
ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(RngTests, RngTestS32, ::testing::ValuesIn(inputs_s32));
using RngMdspanTestS32 = RngMdspanTest<int32_t>;
TEST_P(RngMdspanTestS32, Result)
{
float meanvar[2];
getExpectedMeanVar(meanvar);
ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(RngMdspanTests, RngMdspanTestS32, ::testing::ValuesIn(inputs_s32));
const std::vector<RngInputs<int64_t>> inputs_s64 = {
{0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL},
{0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL},
{0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL},
{0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL}};
using RngTestS64 = RngTest<int64_t>;
TEST_P(RngTestS64, Result)
{
float meanvar[2];
getExpectedMeanVar(meanvar);
ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(RngTests, RngTestS64, ::testing::ValuesIn(inputs_s64));
using RngMdspanTestS64 = RngMdspanTest<int64_t>;
TEST_P(RngMdspanTestS64, Result)
{
float meanvar[2];
getExpectedMeanVar(meanvar);
ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(RngMdspanTests, RngMdspanTestS64, ::testing::ValuesIn(inputs_s64));
} // namespace random
} // namespace raft
| 99cd2d4158fdd9950474f7ec04fb1841dba7f5d0.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <cub/cub.cuh>
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace random {
using namespace raft::random;
enum RandomType { RNG_Uniform };
template <typename T, int TPB>
__global__ void meanKernel(float* out, const T* data, int len)
{
typedef cub::BlockReduce<float, TPB> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
float val = tid < len ? data[tid] : T(0);
float x = BlockReduce(temp_storage).Sum(val);
__syncthreads();
float xx = BlockReduce(temp_storage).Sum(val * val);
__syncthreads();
if (threadIdx.x == 0) {
raft::myAtomicAdd(out, x);
raft::myAtomicAdd(out + 1, xx);
}
}
template <typename T>
struct RngInputs {
float tolerance;
int len;
// start, end: for uniform
// mean, sigma: for normal/lognormal
// mean, beta: for gumbel
// mean, scale: for logistic and laplace
// lambda: for exponential
// sigma: for rayleigh
T start, end;
RandomType type;
GeneratorType gtype;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const RngInputs<T>& dims)
{
return os;
}
template <typename T>
class RngTest : public ::testing::TestWithParam<RngInputs<T>> {
public:
RngTest()
: params(::testing::TestWithParam<RngInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
data(0, stream),
stats(2, stream)
{
data.resize(params.len, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(stats.data(), 0, 2 * sizeof(float), stream));
}
protected:
void SetUp() override
{
RngState r(params.seed, params.gtype);
switch (params.type) {
case RNG_Uniform:
uniformInt(handle, r, data.data(), params.len, params.start, params.end);
break;
};
static const int threads = 128;
meanKernel<T, threads><<<raft::ceildiv(params.len, threads), threads, 0, stream>>>(
stats.data(), data.data(), params.len);
update_host<float>(h_stats, stats.data(), 2, stream);
resource::sync_stream(handle, stream);
h_stats[0] /= params.len;
h_stats[1] = (h_stats[1] / params.len) - (h_stats[0] * h_stats[0]);
resource::sync_stream(handle, stream);
}
void getExpectedMeanVar(float meanvar[2])
{
switch (params.type) {
case RNG_Uniform:
meanvar[0] = (params.start + params.end) * 0.5f;
meanvar[1] = params.end - params.start;
meanvar[1] = meanvar[1] * meanvar[1] / 12.f;
break;
};
}
protected:
raft::resources handle;
cudaStream_t stream;
RngInputs<T> params;
rmm::device_uvector<T> data;
rmm::device_uvector<float> stats;
float h_stats[2]; // mean, var
};
template <typename T>
class RngMdspanTest : public ::testing::TestWithParam<RngInputs<T>> {
public:
RngMdspanTest()
: params(::testing::TestWithParam<RngInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
data(0, stream),
stats(2, stream)
{
data.resize(params.len, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(stats.data(), 0, 2 * sizeof(float), stream));
}
protected:
void SetUp() override
{
RngState r(params.seed, params.gtype);
raft::device_vector_view<T> data_view(data.data(), data.size());
switch (params.type) {
case RNG_Uniform: uniformInt(handle, r, data_view, params.start, params.end); break;
};
static const int threads = 128;
meanKernel<T, threads><<<raft::ceildiv(params.len, threads), threads, 0, stream>>>(
stats.data(), data.data(), params.len);
update_host<float>(h_stats, stats.data(), 2, stream);
resource::sync_stream(handle, stream);
h_stats[0] /= params.len;
h_stats[1] = (h_stats[1] / params.len) - (h_stats[0] * h_stats[0]);
resource::sync_stream(handle, stream);
}
void getExpectedMeanVar(float meanvar[2])
{
switch (params.type) {
case RNG_Uniform:
meanvar[0] = (params.start + params.end) * 0.5f;
meanvar[1] = params.end - params.start;
meanvar[1] = meanvar[1] * meanvar[1] / 12.f;
break;
};
}
protected:
raft::resources handle;
cudaStream_t stream;
RngInputs<T> params;
rmm::device_uvector<T> data;
rmm::device_uvector<float> stats;
float h_stats[2]; // mean, var
};
const std::vector<RngInputs<uint32_t>> inputs_u32 = {
{0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL},
{0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL},
{0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL},
{0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL}};
using RngTestU32 = RngTest<uint32_t>;
TEST_P(RngTestU32, Result)
{
float meanvar[2];
getExpectedMeanVar(meanvar);
ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(RngTests, RngTestU32, ::testing::ValuesIn(inputs_u32));
using RngMdspanTestU32 = RngMdspanTest<uint32_t>;
TEST_P(RngMdspanTestU32, Result)
{
float meanvar[2];
getExpectedMeanVar(meanvar);
ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(RngMdspanTests, RngMdspanTestU32, ::testing::ValuesIn(inputs_u32));
const std::vector<RngInputs<uint64_t>> inputs_u64 = {
{0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL},
{0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL},
{0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL},
{0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL}};
using RngTestU64 = RngTest<uint64_t>;
TEST_P(RngTestU64, Result)
{
float meanvar[2];
getExpectedMeanVar(meanvar);
ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(RngTests, RngTestU64, ::testing::ValuesIn(inputs_u64));
using RngMdspanTestU64 = RngMdspanTest<uint64_t>;
TEST_P(RngMdspanTestU64, Result)
{
float meanvar[2];
getExpectedMeanVar(meanvar);
ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(RngMdspanTests, RngMdspanTestU64, ::testing::ValuesIn(inputs_u64));
const std::vector<RngInputs<int32_t>> inputs_s32 = {
{0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL},
{0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL},
{0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL},
{0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL}};
using RngTestS32 = RngTest<int32_t>;
TEST_P(RngTestS32, Result)
{
float meanvar[2];
getExpectedMeanVar(meanvar);
ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(RngTests, RngTestS32, ::testing::ValuesIn(inputs_s32));
using RngMdspanTestS32 = RngMdspanTest<int32_t>;
TEST_P(RngMdspanTestS32, Result)
{
float meanvar[2];
getExpectedMeanVar(meanvar);
ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(RngMdspanTests, RngMdspanTestS32, ::testing::ValuesIn(inputs_s32));
const std::vector<RngInputs<int64_t>> inputs_s64 = {
{0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL},
{0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL},
{0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL},
{0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL}};
using RngTestS64 = RngTest<int64_t>;
TEST_P(RngTestS64, Result)
{
float meanvar[2];
getExpectedMeanVar(meanvar);
ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(RngTests, RngTestS64, ::testing::ValuesIn(inputs_s64));
using RngMdspanTestS64 = RngMdspanTest<int64_t>;
TEST_P(RngMdspanTestS64, Result)
{
float meanvar[2];
getExpectedMeanVar(meanvar);
ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(RngMdspanTests, RngMdspanTestS64, ::testing::ValuesIn(inputs_s64));
} // namespace random
} // namespace raft
|
b1541b9bc78d4b32fc97e36681a3163e5b81efa8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <array>
#include <iostream>
#include "CudaUtils.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cstdint>
int main() {
thrust::host_vector<int> src(std::vector<int> { 10, 25, 4, -2, 15, 35, 27, 99, 1 });
thrust::host_vector<int> res;
runWithProfiler([&]() {
thrust::device_vector<int> devSrc = src;
thrust::device_vector<uint8_t> devRes(devSrc.size());
thrust::transform(devSrc.begin(), devSrc.end(), devRes.begin(), [] __device__ (auto v) { return __popc(v); });
res = devRes;
});
// Print the results
for (int col = 0; col < res.size(); ++col) {
std::cout << res[col] << std::endl;
}
}
| b1541b9bc78d4b32fc97e36681a3163e5b81efa8.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <array>
#include <iostream>
#include "CudaUtils.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cstdint>
int main() {
thrust::host_vector<int> src(std::vector<int> { 10, 25, 4, -2, 15, 35, 27, 99, 1 });
thrust::host_vector<int> res;
runWithProfiler([&]() {
thrust::device_vector<int> devSrc = src;
thrust::device_vector<uint8_t> devRes(devSrc.size());
thrust::transform(devSrc.begin(), devSrc.end(), devRes.begin(), [] __device__ (auto v) { return __popc(v); });
res = devRes;
});
// Print the results
for (int col = 0; col < res.size(); ++col) {
std::cout << res[col] << std::endl;
}
}
|
c4ceb9c3498151fd6f2d8c731e2d8c9d6489c729.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
__global__ void limitCoeff (int nBatch, int rbs, int rScale, float maxCoeff, float *DA, float *RA, float *CA)
{
int taskIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (taskIdx < nBatch)
{
int i = taskIdx; // % (nBatch / 2);
// support only 2 coefficients
int nCoeff = 2;
// locate arrays pointer
int daOffset = i * rbs * rScale * nCoeff;
int raOffset = i * rbs * rScale;
int caOffset = i * nCoeff; // support only 2 coefficients
// check if need to refit coefficients
if (CA[caOffset + 1] > maxCoeff || CA[caOffset + 1] < -maxCoeff) {
// set to maximum or minimum depend on sign
if (CA[caOffset + 1] > maxCoeff) {
CA[caOffset + 1] = maxCoeff;
} else if (CA[caOffset + 1] < -maxCoeff) {
CA[caOffset + 1] = -maxCoeff;
}
// refit coefficients
float suma = 0.0f; // power 1 coeff
float sumb = 0.0f; // power 0 coeff
for(int j = 0; j<rbs * rScale ;j++){
suma += DA[daOffset + rbs * rScale + j];
sumb += RA[raOffset + j];
}
CA[caOffset] = (sumb - CA[caOffset + 1] * suma) / (rbs * rScale);
}
}
} | c4ceb9c3498151fd6f2d8c731e2d8c9d6489c729.cu | #include "includes.h"
extern "C"
__global__ void limitCoeff (int nBatch, int rbs, int rScale, float maxCoeff, float *DA, float *RA, float *CA)
{
int taskIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (taskIdx < nBatch)
{
int i = taskIdx; // % (nBatch / 2);
// support only 2 coefficients
int nCoeff = 2;
// locate arrays pointer
int daOffset = i * rbs * rScale * nCoeff;
int raOffset = i * rbs * rScale;
int caOffset = i * nCoeff; // support only 2 coefficients
// check if need to refit coefficients
if (CA[caOffset + 1] > maxCoeff || CA[caOffset + 1] < -maxCoeff) {
// set to maximum or minimum depend on sign
if (CA[caOffset + 1] > maxCoeff) {
CA[caOffset + 1] = maxCoeff;
} else if (CA[caOffset + 1] < -maxCoeff) {
CA[caOffset + 1] = -maxCoeff;
}
// refit coefficients
float suma = 0.0f; // power 1 coeff
float sumb = 0.0f; // power 0 coeff
for(int j = 0; j<rbs * rScale ;j++){
suma += DA[daOffset + rbs * rScale + j];
sumb += RA[raOffset + j];
}
CA[caOffset] = (sumb - CA[caOffset + 1] * suma) / (rbs * rScale);
}
}
} |
a592751af78fda607289ebac1f6ad5e49d21a529.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <hip/hip_runtime.h>
#include <time.h>
using namespace std;
//Funcin para redondear la C prima
//Utilizaremos la funcin ceil, que se encarga de redondear el argumento redondeado hacia arriba
void redondeo(float &numero0, float &numero1, double cifras){
double i, j;
i=((numero0*pow(10,cifras))-0.02)/pow(10,cifras);
j=((numero1*pow(10,cifras))-0.02)/pow(10,cifras);
numero0=ceil(i);
numero1=ceil(j);
}
//Funcion de "nucleo" que calcula la suma dando a cada hebra la
//funcion de calcular la suma de los elementos de una posicion
__global__ void vecAddKernel(float *A, float *B, float *C, int n){
int i = threadIdx.x+blockDim.x*blockIdx.x;
if(i<n)
C[i] = A[i]+B[i];
}
int leeDimension (char fichero [20]){
fstream filein1;
filein1.open(fichero, ios::in);
if(!filein1){
cerr<<"No se pudo abrir el fichero1"<<endl;
exit(1);
}
int filas=0;
char caracter;
while( filein1.get(caracter) ){
if(caracter=='\n')
filas++;
}
return filas;
}
//Funcin que suma en CUDA dos vectores
void vecAdd(float *hA, float *hB, float *hC, int n, double &ncgt){
int size = n*sizeof(float);
float * dA, *dB, *dC;
struct timespec cgt1,cgt2;
int BlockDim;
//Reservamos memoria y movemos las entradas a la memoria del device
hipMalloc((void **) &dA,size);
hipMemcpy(dA,hA,size,hipMemcpyHostToDevice);
hipMalloc((void **) &dB,size);
hipMemcpy(dB,hB,size,hipMemcpyHostToDevice);
hipMalloc((void **) &dC,size);
if(n>1024)
BlockDim=512;
else
BlockDim=ceil(n/2)+1;
dim3 dimblock(BlockDim,1,1);
dim3 dimgrid(ceil(n/BlockDim)+1 ,1,1);
//Iniciamos el nucleo para calcular la suma
clock_gettime(CLOCK_REALTIME,&cgt1);
hipLaunchKernelGGL(( vecAddKernel), dim3(dimgrid), dim3(dimblock), 0, 0, dA, dB, dC, n);
clock_gettime(CLOCK_REALTIME,&cgt2);
ncgt=(double) (cgt2.tv_sec-cgt1.tv_sec)+
(double) ((cgt2.tv_nsec-cgt1.tv_nsec)/(1.e+9));
//Copiamos el resultado del host
hipMemcpy(hC,dC,size,hipMemcpyDeviceToHost);
//Liberamos memoria
hipFree(dA);hipFree(dB);hipFree(dC);
}
int main(int argc, char **argv){
if(argc< 4){
cerr<<"Error al introducir argumentos, el orden de los vectores sera:\nVector1\nVector2\nOutput\n";
exit(1);
}
printf("********************TARJETA***************************\n" );
int numeroDevice;
hipGetDeviceCount(&numeroDevice);
for (int i = 0; i < numeroDevice; i++) {
hipDeviceProp_t propiedades;
hipGetDeviceProperties(&propiedades, i);
printf("Dispositivo Numero: %d\n", i);
printf(" Nombre del dispositivo: %s\n", propiedades.name);
printf(" Frecuencia Reloj (KHz): %d\n",
propiedades.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
propiedades.memoryBusWidth);
printf(" Ancho de Banda (GB/s): %f\n\n",
2.0*propiedades.memoryClockRate*(propiedades.memoryBusWidth/8)/1.0e6);
}
printf("********************TARJETA***************************\n" );
int n=leeDimension(argv[1]);
// cout<< endl<<" Tamanno Vectores = "<<n<<endl;
float *h_A, *h_B, *h_C, *vector_output;
h_A= new float[n];
h_B=new float[n];
h_C=new float[n];
vector_output=new float[n];
//Leemos el fichero
ifstream imput0 (argv[1], ifstream::in);
ifstream imput1 (argv[2], ifstream::in);
ifstream output (argv[3], ifstream::in);
char c[20];
//Convertimos la cadena a un float
for(int i=0; i<=n; i++){
imput0 >>c;
h_A[i]=atof(c);
imput1 >>c;
h_B[i]=atof(c);
output >>c;
vector_output[i]=atof(c);
}
// almacenamiento de n elementos h_A,h_B y h_C
double ncgt;
vecAdd(h_A, h_B, h_C, n, ncgt);
//Mostrar vectores y resultados
/*
for(int i=0; i<filas; i++)
cout<<"h_C["<<i<<"]="<<h_C[i]<<" vector_output["<<i<<"]="<<vector_output[i]<<endl;
*/
for(int i= 1;i<=n;i++)
cout<<h_A[i]<<"+"<<h_B[i]<<"="<<h_C[i]<<endl;
// Comprobacion
int linea=0;
bool no_falla=true;
for(int i=1; i<n && no_falla; i++){
redondeo(h_C[i],vector_output[i],2);
if(h_C[i]!=vector_output[i]){
no_falla=false;
linea=i;
}
}
if(no_falla)
cout<<"\nResultados correctos"<<endl;
cout.precision(8);
cout.setf(ios::fixed);
cout<<"Tamao del vector : "<<n<<"\nTiempo de ejecucin : "<<ncgt<<endl<<endl;
}
| a592751af78fda607289ebac1f6ad5e49d21a529.cu | #include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <cuda.h>
#include <time.h>
using namespace std;
//Función para redondear la C prima
//Utilizaremos la función ceil, que se encarga de redondear el argumento redondeado hacia arriba
void redondeo(float &numero0, float &numero1, double cifras){
double i, j;
i=((numero0*pow(10,cifras))-0.02)/pow(10,cifras);
j=((numero1*pow(10,cifras))-0.02)/pow(10,cifras);
numero0=ceil(i);
numero1=ceil(j);
}
//Funcion de "nucleo" que calcula la suma dando a cada hebra la
//funcion de calcular la suma de los elementos de una posicion
__global__ void vecAddKernel(float *A, float *B, float *C, int n){
int i = threadIdx.x+blockDim.x*blockIdx.x;
if(i<n)
C[i] = A[i]+B[i];
}
int leeDimension (char fichero [20]){
fstream filein1;
filein1.open(fichero, ios::in);
if(!filein1){
cerr<<"No se pudo abrir el fichero1"<<endl;
exit(1);
}
int filas=0;
char caracter;
while( filein1.get(caracter) ){
if(caracter=='\n')
filas++;
}
return filas;
}
//Función que suma en CUDA dos vectores
void vecAdd(float *hA, float *hB, float *hC, int n, double &ncgt){
int size = n*sizeof(float);
float * dA, *dB, *dC;
struct timespec cgt1,cgt2;
int BlockDim;
//Reservamos memoria y movemos las entradas a la memoria del device
cudaMalloc((void **) &dA,size);
cudaMemcpy(dA,hA,size,cudaMemcpyHostToDevice);
cudaMalloc((void **) &dB,size);
cudaMemcpy(dB,hB,size,cudaMemcpyHostToDevice);
cudaMalloc((void **) &dC,size);
if(n>1024)
BlockDim=512;
else
BlockDim=ceil(n/2)+1;
dim3 dimblock(BlockDim,1,1);
dim3 dimgrid(ceil(n/BlockDim)+1 ,1,1);
//Iniciamos el nucleo para calcular la suma
clock_gettime(CLOCK_REALTIME,&cgt1);
vecAddKernel<<< dimgrid, dimblock>>>(dA, dB, dC, n);
clock_gettime(CLOCK_REALTIME,&cgt2);
ncgt=(double) (cgt2.tv_sec-cgt1.tv_sec)+
(double) ((cgt2.tv_nsec-cgt1.tv_nsec)/(1.e+9));
//Copiamos el resultado del host
cudaMemcpy(hC,dC,size,cudaMemcpyDeviceToHost);
//Liberamos memoria
cudaFree(dA);cudaFree(dB);cudaFree(dC);
}
int main(int argc, char **argv){
if(argc< 4){
cerr<<"Error al introducir argumentos, el orden de los vectores sería:\nVector1\nVector2\nOutput\n";
exit(1);
}
printf("********************TARJETA***************************\n" );
int numeroDevice;
cudaGetDeviceCount(&numeroDevice);
for (int i = 0; i < numeroDevice; i++) {
cudaDeviceProp propiedades;
cudaGetDeviceProperties(&propiedades, i);
printf("Dispositivo Numero: %d\n", i);
printf(" Nombre del dispositivo: %s\n", propiedades.name);
printf(" Frecuencia Reloj (KHz): %d\n",
propiedades.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
propiedades.memoryBusWidth);
printf(" Ancho de Banda (GB/s): %f\n\n",
2.0*propiedades.memoryClockRate*(propiedades.memoryBusWidth/8)/1.0e6);
}
printf("********************TARJETA***************************\n" );
int n=leeDimension(argv[1]);
// cout<< endl<<" Tamanno Vectores = "<<n<<endl;
float *h_A, *h_B, *h_C, *vector_output;
h_A= new float[n];
h_B=new float[n];
h_C=new float[n];
vector_output=new float[n];
//Leemos el fichero
ifstream imput0 (argv[1], ifstream::in);
ifstream imput1 (argv[2], ifstream::in);
ifstream output (argv[3], ifstream::in);
char c[20];
//Convertimos la cadena a un float
for(int i=0; i<=n; i++){
imput0 >>c;
h_A[i]=atof(c);
imput1 >>c;
h_B[i]=atof(c);
output >>c;
vector_output[i]=atof(c);
}
// almacenamiento de n elementos h_A,h_B y h_C
double ncgt;
vecAdd(h_A, h_B, h_C, n, ncgt);
//Mostrar vectores y resultados
/*
for(int i=0; i<filas; i++)
cout<<"h_C["<<i<<"]="<<h_C[i]<<" vector_output["<<i<<"]="<<vector_output[i]<<endl;
*/
for(int i= 1;i<=n;i++)
cout<<h_A[i]<<"+"<<h_B[i]<<"="<<h_C[i]<<endl;
// Comprobacion
int linea=0;
bool no_falla=true;
for(int i=1; i<n && no_falla; i++){
redondeo(h_C[i],vector_output[i],2);
if(h_C[i]!=vector_output[i]){
no_falla=false;
linea=i;
}
}
if(no_falla)
cout<<"\nResultados correctos"<<endl;
cout.precision(8);
cout.setf(ios::fixed);
cout<<"Tamaño del vector : "<<n<<"\nTiempo de ejecución : "<<ncgt<<endl<<endl;
}
|
4abcb619e5f2d0dd98b553472cf854b67152d3c7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#define CUDA_CHECK(value, label) { \
hipError_t c = (value); \
if (c != hipSuccess) { \
fprintf(stderr, \
"Error: '%s' at line %d in %s\n", \
hipGetErrorString(c),__LINE__,__FILE__); \
goto label; \
} }
static __global__ void prefix_scan_device(float *in, float *out, int size) {
// Do CUDA stuff
}
void prefix_scan(float *in, float *out, int size) {
float *d_in=0, *d_out=0;
CUDA_CHECK(hipMalloc(&d_in, size * sizeof(float)), cuda_error)
CUDA_CHECK(hipMalloc(&d_out, size * sizeof(float)), cuda_error)
CUDA_CHECK(hipMemcpy(d_in, in, size * sizeof(float), hipMemcpyHostToDevice), cuda_error)
hipLaunchKernelGGL(( prefix_scan_device), dim3(128), dim3(1), 0, 0, d_in, d_out, size);
CUDA_CHECK(hipMemcpy(out, d_out, size * sizeof(float), hipMemcpyDeviceToHost), cuda_error)
cuda_error:
if(d_in) hipFree(d_in);
if(d_out) hipFree(d_out);
}
| 4abcb619e5f2d0dd98b553472cf854b67152d3c7.cu | #include <stdio.h>
#include <cuda.h>
#define CUDA_CHECK(value, label) { \
cudaError_t c = (value); \
if (c != cudaSuccess) { \
fprintf(stderr, \
"Error: '%s' at line %d in %s\n", \
cudaGetErrorString(c),__LINE__,__FILE__); \
goto label; \
} }
static __global__ void prefix_scan_device(float *in, float *out, int size) {
// Do CUDA stuff
}
void prefix_scan(float *in, float *out, int size) {
float *d_in=0, *d_out=0;
CUDA_CHECK(cudaMalloc(&d_in, size * sizeof(float)), cuda_error)
CUDA_CHECK(cudaMalloc(&d_out, size * sizeof(float)), cuda_error)
CUDA_CHECK(cudaMemcpy(d_in, in, size * sizeof(float), cudaMemcpyHostToDevice), cuda_error)
prefix_scan_device<<<128, 1>>>(d_in, d_out, size);
CUDA_CHECK(cudaMemcpy(out, d_out, size * sizeof(float), cudaMemcpyDeviceToHost), cuda_error)
cuda_error:
if(d_in) cudaFree(d_in);
if(d_out) cudaFree(d_out);
}
|
d911d7137329a7200eecc124da0a35860723198f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <algorithm>
#include <gtest/gtest.h>
#include <numeric>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/matrix/col_wise_sort.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace matrix {
template <typename T>
std::vector<int>* sort_indexes(const std::vector<T>& v)
{
// initialize original index locations
std::vector<int>* idx = new std::vector<int>(v.size());
std::iota((*idx).begin(), (*idx).end(), 0);
// sort indexes based on comparing values in v
std::sort((*idx).begin(), (*idx).end(), [&v](int i1, int i2) { return v[i1] < v[i2]; });
return idx;
}
template <typename T>
struct columnSort {
T tolerance;
int n_row;
int n_col;
bool testKeys;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const columnSort<T>& dims)
{
return os;
}
template <typename T>
class ColumnSort : public ::testing::TestWithParam<columnSort<T>> {
protected:
ColumnSort()
: keyIn(0, resource::get_cuda_stream(handle)),
keySorted(0, resource::get_cuda_stream(handle)),
keySortGolden(0, resource::get_cuda_stream(handle)),
valueOut(0, resource::get_cuda_stream(handle)),
goldenValOut(0, resource::get_cuda_stream(handle))
{
}
void SetUp() override
{
params = ::testing::TestWithParam<columnSort<T>>::GetParam();
int len = params.n_row * params.n_col;
keyIn.resize(len, resource::get_cuda_stream(handle));
valueOut.resize(len, resource::get_cuda_stream(handle));
goldenValOut.resize(len, resource::get_cuda_stream(handle));
if (params.testKeys) {
keySorted.resize(len, resource::get_cuda_stream(handle));
keySortGolden.resize(len, resource::get_cuda_stream(handle));
}
std::vector<T> vals(len);
std::vector<int> cValGolden(len);
std::iota(vals.begin(), vals.end(),
1.0f); // will have to change input param type
std::random_shuffle(vals.begin(), vals.end());
std::vector<T> cKeyGolden(len);
for (int i = 0; i < params.n_row; i++) {
std::vector<T> tmp(vals.begin() + i * params.n_col, vals.begin() + (i + 1) * params.n_col);
auto cpuOut = sort_indexes(tmp);
std::copy((*cpuOut).begin(), (*cpuOut).end(), cValGolden.begin() + i * params.n_col);
delete cpuOut;
if (params.testKeys) {
std::sort(tmp.begin(), tmp.end());
std::copy(tmp.begin(), tmp.end(), cKeyGolden.begin() + i * params.n_col);
}
}
raft::update_device(keyIn.data(), &vals[0], len, resource::get_cuda_stream(handle));
raft::update_device(
goldenValOut.data(), &cValGolden[0], len, resource::get_cuda_stream(handle));
if (params.testKeys)
raft::update_device(
keySortGolden.data(), &cKeyGolden[0], len, resource::get_cuda_stream(handle));
auto key_in_view = raft::make_device_matrix_view<const T, int, row_major>(
keyIn.data(), params.n_row, params.n_col);
auto value_out_view = raft::make_device_matrix_view<int, int, row_major>(
valueOut.data(), params.n_row, params.n_col);
auto key_sorted_view = raft::make_device_matrix_view<T, int, row_major>(
keySorted.data(), params.n_row, params.n_col);
raft::matrix::sort_cols_per_row(
handle, key_in_view, value_out_view, std::make_optional(key_sorted_view));
RAFT_CUDA_TRY(hipStreamSynchronize(resource::get_cuda_stream(handle)));
}
protected:
raft::resources handle;
columnSort<T> params;
rmm::device_uvector<T> keyIn, keySorted, keySortGolden;
rmm::device_uvector<int> valueOut, goldenValOut; // valueOut are indexes
};
const std::vector<columnSort<float>> inputsf1 = {{0.000001f, 503, 2000, false},
{0.000001f, 113, 20000, true},
{0.000001f, 503, 2000, false},
{0.000001f, 113, 20000, true}};
typedef ColumnSort<float> ColumnSortF;
TEST_P(ColumnSortF, Result)
{
// Remove this condition once the implementation of of descending sort is
// fixed.
ASSERT_TRUE(devArrMatch(valueOut.data(),
goldenValOut.data(),
params.n_row * params.n_col,
raft::CompareApprox<float>(params.tolerance)));
if (params.testKeys) {
ASSERT_TRUE(devArrMatch(keySorted.data(),
keySortGolden.data(),
params.n_row * params.n_col,
raft::CompareApprox<float>(params.tolerance)));
}
}
INSTANTIATE_TEST_CASE_P(ColumnSortTests, ColumnSortF, ::testing::ValuesIn(inputsf1));
} // end namespace matrix
} // end namespace raft
| d911d7137329a7200eecc124da0a35860723198f.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <algorithm>
#include <gtest/gtest.h>
#include <numeric>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/matrix/col_wise_sort.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace matrix {
template <typename T>
std::vector<int>* sort_indexes(const std::vector<T>& v)
{
// initialize original index locations
std::vector<int>* idx = new std::vector<int>(v.size());
std::iota((*idx).begin(), (*idx).end(), 0);
// sort indexes based on comparing values in v
std::sort((*idx).begin(), (*idx).end(), [&v](int i1, int i2) { return v[i1] < v[i2]; });
return idx;
}
template <typename T>
struct columnSort {
T tolerance;
int n_row;
int n_col;
bool testKeys;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const columnSort<T>& dims)
{
return os;
}
template <typename T>
class ColumnSort : public ::testing::TestWithParam<columnSort<T>> {
protected:
ColumnSort()
: keyIn(0, resource::get_cuda_stream(handle)),
keySorted(0, resource::get_cuda_stream(handle)),
keySortGolden(0, resource::get_cuda_stream(handle)),
valueOut(0, resource::get_cuda_stream(handle)),
goldenValOut(0, resource::get_cuda_stream(handle))
{
}
void SetUp() override
{
params = ::testing::TestWithParam<columnSort<T>>::GetParam();
int len = params.n_row * params.n_col;
keyIn.resize(len, resource::get_cuda_stream(handle));
valueOut.resize(len, resource::get_cuda_stream(handle));
goldenValOut.resize(len, resource::get_cuda_stream(handle));
if (params.testKeys) {
keySorted.resize(len, resource::get_cuda_stream(handle));
keySortGolden.resize(len, resource::get_cuda_stream(handle));
}
std::vector<T> vals(len);
std::vector<int> cValGolden(len);
std::iota(vals.begin(), vals.end(),
1.0f); // will have to change input param type
std::random_shuffle(vals.begin(), vals.end());
std::vector<T> cKeyGolden(len);
for (int i = 0; i < params.n_row; i++) {
std::vector<T> tmp(vals.begin() + i * params.n_col, vals.begin() + (i + 1) * params.n_col);
auto cpuOut = sort_indexes(tmp);
std::copy((*cpuOut).begin(), (*cpuOut).end(), cValGolden.begin() + i * params.n_col);
delete cpuOut;
if (params.testKeys) {
std::sort(tmp.begin(), tmp.end());
std::copy(tmp.begin(), tmp.end(), cKeyGolden.begin() + i * params.n_col);
}
}
raft::update_device(keyIn.data(), &vals[0], len, resource::get_cuda_stream(handle));
raft::update_device(
goldenValOut.data(), &cValGolden[0], len, resource::get_cuda_stream(handle));
if (params.testKeys)
raft::update_device(
keySortGolden.data(), &cKeyGolden[0], len, resource::get_cuda_stream(handle));
auto key_in_view = raft::make_device_matrix_view<const T, int, row_major>(
keyIn.data(), params.n_row, params.n_col);
auto value_out_view = raft::make_device_matrix_view<int, int, row_major>(
valueOut.data(), params.n_row, params.n_col);
auto key_sorted_view = raft::make_device_matrix_view<T, int, row_major>(
keySorted.data(), params.n_row, params.n_col);
raft::matrix::sort_cols_per_row(
handle, key_in_view, value_out_view, std::make_optional(key_sorted_view));
RAFT_CUDA_TRY(cudaStreamSynchronize(resource::get_cuda_stream(handle)));
}
protected:
raft::resources handle;
columnSort<T> params;
rmm::device_uvector<T> keyIn, keySorted, keySortGolden;
rmm::device_uvector<int> valueOut, goldenValOut; // valueOut are indexes
};
const std::vector<columnSort<float>> inputsf1 = {{0.000001f, 503, 2000, false},
{0.000001f, 113, 20000, true},
{0.000001f, 503, 2000, false},
{0.000001f, 113, 20000, true}};
typedef ColumnSort<float> ColumnSortF;
TEST_P(ColumnSortF, Result)
{
// Remove this condition once the implementation of of descending sort is
// fixed.
ASSERT_TRUE(devArrMatch(valueOut.data(),
goldenValOut.data(),
params.n_row * params.n_col,
raft::CompareApprox<float>(params.tolerance)));
if (params.testKeys) {
ASSERT_TRUE(devArrMatch(keySorted.data(),
keySortGolden.data(),
params.n_row * params.n_col,
raft::CompareApprox<float>(params.tolerance)));
}
}
INSTANTIATE_TEST_CASE_P(ColumnSortTests, ColumnSortF, ::testing::ValuesIn(inputsf1));
} // end namespace matrix
} // end namespace raft
|
dbf5c80fc2631568d0f69f248f035d1c082e42af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "ReductionAddTools.h"
#include "Grid.h"
#include "Device.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*---------------------*\
|* public *|
\*----------------------*/
/**
* contrainte : db puissance de 2
*/
__host__ bool isReductionAddTools_II_Ok(const Grid& grid); // __host__ facultatif
__host__ bool isReductionAddTools_II_Ok(); // __host__ facultatif
/*---------------------*\
|* private *|
\*----------------------*/
static __global__ void fillTidGlobal(int* ptrDevResultGM);
static __device__ void reductionIntraThread(int* tabSM);
/*----------------------------------------------------------------------*\
|* Host *|
\*---------------------------------------------------------------------*/
/**
* contrainte : db puissance de 2
*/
__host__ bool isReductionAddTools_II_Ok(const Grid& grid)
{
// MM pour ptrDevResultGM (oubliez pas initialisation)
// appeler kernel
// MM recuprer resultat
// cheker resultat
int* ptrRes = new int[sizeof(int)];
int* ptrResGM;
*ptrRes = 0;
Device::malloc(&ptrResGM, sizeof(int));
Device::memcpyHToD(ptrResGM, ptrRes, sizeof(int));
hipLaunchKernelGGL(( fillTidGlobal), dim3(grid.dg), dim3(grid.db), sizeof(int)*grid.db.x, 0, ptrResGM);
Device::memcpyDToH(ptrRes, ptrResGM, sizeof(int));
long n = grid.db.x * grid.dg.x;
return *ptrRes == n/2*(n-1);
}
__host__ bool isReductionAddTools_II_Ok()
{
bool isOk = true;
dim3 dg = dim3(1,1,1);
dim3 db = dim3(2, 1, 1);
Grid grid(dg, db);
for(int i = 1; i<=64; ++i)
{
dg.x = i;
grid.dg = dg;
for(int j = 2; j<=1024; j*=2)
{
db.x = j;
grid.db = db;
cout << grid << endl;
isOk &= isReductionAddTools_II_Ok(grid);
}
}
return isOk;
}
/*----------------------------------------------------------------------*\
|* Device *|
\*---------------------------------------------------------------------*/
/**
* protocole : TID_GLOBAL partout en tabSM
* resultat: (n-1)n/2 ou n=NB_THREAD_GLOBAL
*/
__global__ void fillTidGlobal(int* ptrDevResultGM)
{
extern __shared__ int tabSM[];
reductionIntraThread(tabSM);
__syncthreads();
ReductionAddTools::reductionADD(tabSM, ptrDevResultGM);
}
__device__ void reductionIntraThread(int* tabSM)
{
tabSM[threadIdx.x] = blockDim.x * blockIdx.x + threadIdx.x;
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| dbf5c80fc2631568d0f69f248f035d1c082e42af.cu | #include <iostream>
#include "ReductionAddTools.h"
#include "Grid.h"
#include "Device.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*---------------------*\
|* public *|
\*----------------------*/
/**
* contrainte : db puissance de 2
*/
__host__ bool isReductionAddTools_II_Ok(const Grid& grid); // __host__ facultatif
__host__ bool isReductionAddTools_II_Ok(); // __host__ facultatif
/*---------------------*\
|* private *|
\*----------------------*/
static __global__ void fillTidGlobal(int* ptrDevResultGM);
static __device__ void reductionIntraThread(int* tabSM);
/*----------------------------------------------------------------------*\
|* Host *|
\*---------------------------------------------------------------------*/
/**
* contrainte : db puissance de 2
*/
__host__ bool isReductionAddTools_II_Ok(const Grid& grid)
{
// MM pour ptrDevResultGM (oubliez pas initialisation)
// appeler kernel
// MM recuprer resultat
// cheker resultat
int* ptrRes = new int[sizeof(int)];
int* ptrResGM;
*ptrRes = 0;
Device::malloc(&ptrResGM, sizeof(int));
Device::memcpyHToD(ptrResGM, ptrRes, sizeof(int));
fillTidGlobal<<<grid.dg, grid.db, sizeof(int)*grid.db.x>>>(ptrResGM);
Device::memcpyDToH(ptrRes, ptrResGM, sizeof(int));
long n = grid.db.x * grid.dg.x;
return *ptrRes == n/2*(n-1);
}
__host__ bool isReductionAddTools_II_Ok()
{
bool isOk = true;
dim3 dg = dim3(1,1,1);
dim3 db = dim3(2, 1, 1);
Grid grid(dg, db);
for(int i = 1; i<=64; ++i)
{
dg.x = i;
grid.dg = dg;
for(int j = 2; j<=1024; j*=2)
{
db.x = j;
grid.db = db;
cout << grid << endl;
isOk &= isReductionAddTools_II_Ok(grid);
}
}
return isOk;
}
/*----------------------------------------------------------------------*\
|* Device *|
\*---------------------------------------------------------------------*/
/**
* protocole : TID_GLOBAL partout en tabSM
* resultat: (n-1)n/2 ou n=NB_THREAD_GLOBAL
*/
__global__ void fillTidGlobal(int* ptrDevResultGM)
{
extern __shared__ int tabSM[];
reductionIntraThread(tabSM);
__syncthreads();
ReductionAddTools::reductionADD(tabSM, ptrDevResultGM);
}
__device__ void reductionIntraThread(int* tabSM)
{
tabSM[threadIdx.x] = blockDim.x * blockIdx.x + threadIdx.x;
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
145f698d31946d9649a6f35f03eef90c69bd5a52.hip | // !!! This is a file automatically generated by hipify!!!
/********************************************************************************
* Monte-Carlo Simulation for Light Transport in 3D Volumes *
*********************************************************************************
* *
* Copyright (C) 2002-2008, David Boas (dboas <at> nmr.mgh.harvard.edu) *
* 2008 Jay Dubb (jdubb <at> nmr.mgh.harvard.edu) *
* 2008 Qianqian Fang (fangq <at> nmr.mgh.harvard.edu) *
* 2011 Ralf Gunter (ralfgunter <at> gmail.com) *
* *
* License: 3-clause BSD License, see LICENSE for details *
* *
********************************************************************************/
#include "main.h"
uint32_t* init_rand_seed(int seed, ExecConfig conf)
{
uint32_t *h_seed, *d_seed;
size_t sizeof_seed;
if(seed > 0)
srand(seed);
else
srand(time(NULL));
// Seed used by the RNG.
sizeof_seed = conf.n_threads * RAND_SEED_LEN * sizeof(uint32_t);
h_seed = (uint32_t *) malloc(sizeof_seed);
for(int i = 0; i < conf.n_threads * RAND_SEED_LEN; i++)
h_seed[i] = rand();
//DEV_ALLOC(d_seed, sizeof_seed);
cutilSafeCall(hipMalloc((void **) &d_seed, sizeof_seed));
TO_DEVICE(d_seed, h_seed, sizeof_seed);
free(h_seed);
return d_seed;
}
void init_params_mem(ExecConfig conf, Simulation *sim, GPUMemory *gmem)
{
uint8_t *h_linear_media_type, *d_media_type;
int4 *d_det_loc;
float4 *d_media_prop;
// Calculate the total number of voxel elements.
int grid_dim = sim->grid.dim.x * sim->grid.dim.y * sim->grid.dim.z;
// Linearize media_type, as CUDA cannot handle pointers to pointers.
h_linear_media_type = (uint8_t *) malloc(grid_dim * sizeof(uint8_t));
linearize_3d(sim->grid.media_type, h_linear_media_type,
sim->grid.dim.x, sim->grid.dim.y, sim->grid.dim.z);
// Allocate memory on the GPU global memory.
DEV_ALLOC(&d_det_loc, MAX_DETECTORS * sizeof(int4));
DEV_ALLOC(&d_media_prop, (MAX_TISSUES + 1) * sizeof(float4));
DEV_ALLOC(&d_media_type, grid_dim * sizeof(uint8_t));
// Copy simulation memory to the GPU.
//hipMemcpyToSymbol("det_loc", sim->det.info, sim->det.num * sizeof(int4));
//hipMemcpyToSymbol("media_prop", sim->tiss.prop, (sim->tiss.num + 1) * sizeof(float4));
TO_DEVICE(d_det_loc, sim->det.info, MAX_DETECTORS * sizeof(int4));
TO_DEVICE(d_media_prop, sim->tiss.prop, (MAX_TISSUES + 1) * sizeof(float4));
TO_DEVICE(d_media_type, h_linear_media_type, grid_dim * sizeof(uint8_t));
// Update GPU memory structure (so that its pointers can be used elsewhere).
gmem->det_loc = d_det_loc;
gmem->media_prop = d_media_prop;
gmem->media_type = d_media_type;
// Free temporary memory used on the host.
free(h_linear_media_type);
}
void init_results_mem(ExecConfig conf, Simulation *sim, GPUMemory *gmem)
{
float *d_fbox;
float *d_path_length, *d_mom_transfer;
float *d_temp_path_length, *d_temp_mom_transfer;
float *h_temp_tissueArrays;
int8_t *d_det_hit;
size_t num_temp_tissueArrays, num_tissueArrays, num_fbox;
// Setup the path length and momentum transfer arrays.
//num_tissueArrays = (sim->tiss.num + 1) * sim->n_photons;
num_tissueArrays = 1 << NUM_HASH_BITS;
num_temp_tissueArrays = conf.n_threads * (sim->tiss.num + 1);
sim->path_length = (float *) calloc(num_tissueArrays, sizeof(float));
sim->mom_transfer = (float *) calloc(num_tissueArrays, sizeof(float));
h_temp_tissueArrays = (float *) calloc(num_temp_tissueArrays, sizeof(float));
// Photon fluence.
num_fbox = sim->grid.nIxyz * sim->num_time_steps;
sim->fbox = (float *) calloc(num_fbox, sizeof(float));
// Array of which photons hit which detectors (if any).
sim->det.hit = (int8_t *) calloc(sim->n_photons, sizeof(int8_t));
// Allocate memory on the GPU global memory.
DEV_ALLOC(&d_path_length, num_tissueArrays * sizeof(float));
DEV_ALLOC(&d_mom_transfer, num_tissueArrays * sizeof(float));
DEV_ALLOC(&d_fbox, num_fbox * sizeof(float));
DEV_ALLOC(&d_det_hit, sim->n_photons * sizeof(int8_t));
DEV_ALLOC(&d_temp_path_length, num_temp_tissueArrays * sizeof(float));
DEV_ALLOC(&d_temp_mom_transfer, num_temp_tissueArrays * sizeof(float));
// Copy simulation memory to the GPU.
TO_DEVICE(d_path_length, sim->path_length, num_tissueArrays * sizeof(float));
TO_DEVICE(d_mom_transfer, sim->mom_transfer, num_tissueArrays * sizeof(float));
TO_DEVICE(d_fbox, sim->fbox, num_fbox * sizeof(float));
TO_DEVICE(d_det_hit, sim->det.hit, sim->n_photons * sizeof(int8_t));
TO_DEVICE(d_temp_path_length, h_temp_tissueArrays, num_temp_tissueArrays * sizeof(float));
TO_DEVICE(d_temp_mom_transfer, h_temp_tissueArrays, num_temp_tissueArrays * sizeof(float));
// Update GPU memory structure (so that its pointers can be used elsewhere).
gmem->path_length = d_path_length;
gmem->mom_transfer = d_mom_transfer;
gmem->fbox = d_fbox;
gmem->det_hit = d_det_hit;
gmem->temp_path_length = d_temp_path_length;
gmem->temp_mom_transfer = d_temp_mom_transfer;
// Free temporary memory used on the host.
free(h_temp_tissueArrays);
}
void copy_mem_symbols(Simulation *sim, GPUMemory *gmem)
{
cutilSafeCall(hipMemcpyToSymbol("s", sim, sizeof(Simulation)));
cutilSafeCall(hipMemcpyToSymbol("g", gmem, sizeof(GPUMemory)));
}
void init_mem(ExecConfig conf, Simulation *sim, GPUMemory *gmem)
{
init_params_mem(conf, sim, gmem);
init_results_mem(conf, sim, gmem);
copy_mem_symbols(sim, gmem);
}
void free_gpu_results_mem_except_fluence(GPUMemory gmem)
{
// Path length and momentum transfer.
cutilSafeCall(hipFree(gmem.path_length));
cutilSafeCall(hipFree(gmem.mom_transfer));
cutilSafeCall(hipFree(gmem.temp_path_length));
cutilSafeCall(hipFree(gmem.temp_mom_transfer));
cutilSafeCall(hipFree(gmem.det_hit));
}
void free_gpu_results_mem(GPUMemory gmem)
{
// Path length and momentum transfer.
cutilSafeCall(hipFree(gmem.path_length));
cutilSafeCall(hipFree(gmem.mom_transfer));
cutilSafeCall(hipFree(gmem.temp_path_length));
cutilSafeCall(hipFree(gmem.temp_mom_transfer));
// Photon fluence.
cutilSafeCall(hipFree(gmem.fbox));
cutilSafeCall(hipFree(gmem.det_hit));
}
void free_gpu_params_mem(GPUMemory gmem)
{
// Tissue types.
cutilSafeCall(hipFree(gmem.media_type));
// Detectors' locations and radii.
cutilSafeCall(hipFree(gmem.det_loc));
// Optical properties of the different tissue types.
cutilSafeCall(hipFree(gmem.media_prop));
}
void free_cpu_results_mem(Simulation sim)
{
// Path length and momentum transfer.
free(sim.path_length);
free(sim.mom_transfer);
// Photon fluence.
free(sim.fbox);
free(sim.det.hit);
}
void free_cpu_params_mem(Simulation sim)
{
// Tissue types.
for(int i = 0; i < sim.grid.dim.x; i++) {
for(int j = 0; j < sim.grid.dim.y; j++) {
free(sim.grid.media_type[i][j]);
}
free(sim.grid.media_type[i]);
}
free(sim.grid.media_type);
// Detectors' locations and radii.
free(sim.det.info);
// Optical properties of the different tissue types.
free(sim.tiss.prop);
}
void free_mem(Simulation sim, GPUMemory gmem)
{
free_gpu_params_mem(gmem); free_gpu_results_mem(gmem);
free_cpu_params_mem(sim); free_cpu_results_mem(sim);
}
void retrieve(Simulation *sim, GPUMemory *gmem)
{
//size_t sizeof_tissueArrays = sim->n_photons * (sim->tiss.num + 1) * sizeof(float);
size_t sizeof_tissueArrays = (1 << NUM_HASH_BITS) * sizeof(float);
size_t sizeof_fbox = sim->grid.nIxyz * sim->num_time_steps * sizeof(float);
size_t sizeof_det_hit = sim->n_photons * sizeof(int8_t);
TO_HOST(sim->path_length, gmem->path_length, sizeof_tissueArrays);
TO_HOST(sim->mom_transfer, gmem->mom_transfer, sizeof_tissueArrays);
TO_HOST(sim->fbox, gmem->fbox, sizeof_fbox);
TO_HOST(sim->det.hit, gmem->det_hit, sizeof_det_hit);
}
| 145f698d31946d9649a6f35f03eef90c69bd5a52.cu | /********************************************************************************
* Monte-Carlo Simulation for Light Transport in 3D Volumes *
*********************************************************************************
* *
* Copyright (C) 2002-2008, David Boas (dboas <at> nmr.mgh.harvard.edu) *
* 2008 Jay Dubb (jdubb <at> nmr.mgh.harvard.edu) *
* 2008 Qianqian Fang (fangq <at> nmr.mgh.harvard.edu) *
* 2011 Ralf Gunter (ralfgunter <at> gmail.com) *
* *
* License: 3-clause BSD License, see LICENSE for details *
* *
********************************************************************************/
#include "main.h"
uint32_t* init_rand_seed(int seed, ExecConfig conf)
{
uint32_t *h_seed, *d_seed;
size_t sizeof_seed;
if(seed > 0)
srand(seed);
else
srand(time(NULL));
// Seed used by the RNG.
sizeof_seed = conf.n_threads * RAND_SEED_LEN * sizeof(uint32_t);
h_seed = (uint32_t *) malloc(sizeof_seed);
for(int i = 0; i < conf.n_threads * RAND_SEED_LEN; i++)
h_seed[i] = rand();
//DEV_ALLOC(d_seed, sizeof_seed);
cutilSafeCall(cudaMalloc((void **) &d_seed, sizeof_seed));
TO_DEVICE(d_seed, h_seed, sizeof_seed);
free(h_seed);
return d_seed;
}
void init_params_mem(ExecConfig conf, Simulation *sim, GPUMemory *gmem)
{
uint8_t *h_linear_media_type, *d_media_type;
int4 *d_det_loc;
float4 *d_media_prop;
// Calculate the total number of voxel elements.
int grid_dim = sim->grid.dim.x * sim->grid.dim.y * sim->grid.dim.z;
// Linearize media_type, as CUDA cannot handle pointers to pointers.
h_linear_media_type = (uint8_t *) malloc(grid_dim * sizeof(uint8_t));
linearize_3d(sim->grid.media_type, h_linear_media_type,
sim->grid.dim.x, sim->grid.dim.y, sim->grid.dim.z);
// Allocate memory on the GPU global memory.
DEV_ALLOC(&d_det_loc, MAX_DETECTORS * sizeof(int4));
DEV_ALLOC(&d_media_prop, (MAX_TISSUES + 1) * sizeof(float4));
DEV_ALLOC(&d_media_type, grid_dim * sizeof(uint8_t));
// Copy simulation memory to the GPU.
//cudaMemcpyToSymbol("det_loc", sim->det.info, sim->det.num * sizeof(int4));
//cudaMemcpyToSymbol("media_prop", sim->tiss.prop, (sim->tiss.num + 1) * sizeof(float4));
TO_DEVICE(d_det_loc, sim->det.info, MAX_DETECTORS * sizeof(int4));
TO_DEVICE(d_media_prop, sim->tiss.prop, (MAX_TISSUES + 1) * sizeof(float4));
TO_DEVICE(d_media_type, h_linear_media_type, grid_dim * sizeof(uint8_t));
// Update GPU memory structure (so that its pointers can be used elsewhere).
gmem->det_loc = d_det_loc;
gmem->media_prop = d_media_prop;
gmem->media_type = d_media_type;
// Free temporary memory used on the host.
free(h_linear_media_type);
}
void init_results_mem(ExecConfig conf, Simulation *sim, GPUMemory *gmem)
{
float *d_fbox;
float *d_path_length, *d_mom_transfer;
float *d_temp_path_length, *d_temp_mom_transfer;
float *h_temp_tissueArrays;
int8_t *d_det_hit;
size_t num_temp_tissueArrays, num_tissueArrays, num_fbox;
// Setup the path length and momentum transfer arrays.
//num_tissueArrays = (sim->tiss.num + 1) * sim->n_photons;
num_tissueArrays = 1 << NUM_HASH_BITS;
num_temp_tissueArrays = conf.n_threads * (sim->tiss.num + 1);
sim->path_length = (float *) calloc(num_tissueArrays, sizeof(float));
sim->mom_transfer = (float *) calloc(num_tissueArrays, sizeof(float));
h_temp_tissueArrays = (float *) calloc(num_temp_tissueArrays, sizeof(float));
// Photon fluence.
num_fbox = sim->grid.nIxyz * sim->num_time_steps;
sim->fbox = (float *) calloc(num_fbox, sizeof(float));
// Array of which photons hit which detectors (if any).
sim->det.hit = (int8_t *) calloc(sim->n_photons, sizeof(int8_t));
// Allocate memory on the GPU global memory.
DEV_ALLOC(&d_path_length, num_tissueArrays * sizeof(float));
DEV_ALLOC(&d_mom_transfer, num_tissueArrays * sizeof(float));
DEV_ALLOC(&d_fbox, num_fbox * sizeof(float));
DEV_ALLOC(&d_det_hit, sim->n_photons * sizeof(int8_t));
DEV_ALLOC(&d_temp_path_length, num_temp_tissueArrays * sizeof(float));
DEV_ALLOC(&d_temp_mom_transfer, num_temp_tissueArrays * sizeof(float));
// Copy simulation memory to the GPU.
TO_DEVICE(d_path_length, sim->path_length, num_tissueArrays * sizeof(float));
TO_DEVICE(d_mom_transfer, sim->mom_transfer, num_tissueArrays * sizeof(float));
TO_DEVICE(d_fbox, sim->fbox, num_fbox * sizeof(float));
TO_DEVICE(d_det_hit, sim->det.hit, sim->n_photons * sizeof(int8_t));
TO_DEVICE(d_temp_path_length, h_temp_tissueArrays, num_temp_tissueArrays * sizeof(float));
TO_DEVICE(d_temp_mom_transfer, h_temp_tissueArrays, num_temp_tissueArrays * sizeof(float));
// Update GPU memory structure (so that its pointers can be used elsewhere).
gmem->path_length = d_path_length;
gmem->mom_transfer = d_mom_transfer;
gmem->fbox = d_fbox;
gmem->det_hit = d_det_hit;
gmem->temp_path_length = d_temp_path_length;
gmem->temp_mom_transfer = d_temp_mom_transfer;
// Free temporary memory used on the host.
free(h_temp_tissueArrays);
}
void copy_mem_symbols(Simulation *sim, GPUMemory *gmem)
{
cutilSafeCall(cudaMemcpyToSymbol("s", sim, sizeof(Simulation)));
cutilSafeCall(cudaMemcpyToSymbol("g", gmem, sizeof(GPUMemory)));
}
void init_mem(ExecConfig conf, Simulation *sim, GPUMemory *gmem)
{
init_params_mem(conf, sim, gmem);
init_results_mem(conf, sim, gmem);
copy_mem_symbols(sim, gmem);
}
void free_gpu_results_mem_except_fluence(GPUMemory gmem)
{
// Path length and momentum transfer.
cutilSafeCall(cudaFree(gmem.path_length));
cutilSafeCall(cudaFree(gmem.mom_transfer));
cutilSafeCall(cudaFree(gmem.temp_path_length));
cutilSafeCall(cudaFree(gmem.temp_mom_transfer));
cutilSafeCall(cudaFree(gmem.det_hit));
}
void free_gpu_results_mem(GPUMemory gmem)
{
// Path length and momentum transfer.
cutilSafeCall(cudaFree(gmem.path_length));
cutilSafeCall(cudaFree(gmem.mom_transfer));
cutilSafeCall(cudaFree(gmem.temp_path_length));
cutilSafeCall(cudaFree(gmem.temp_mom_transfer));
// Photon fluence.
cutilSafeCall(cudaFree(gmem.fbox));
cutilSafeCall(cudaFree(gmem.det_hit));
}
void free_gpu_params_mem(GPUMemory gmem)
{
// Tissue types.
cutilSafeCall(cudaFree(gmem.media_type));
// Detectors' locations and radii.
cutilSafeCall(cudaFree(gmem.det_loc));
// Optical properties of the different tissue types.
cutilSafeCall(cudaFree(gmem.media_prop));
}
void free_cpu_results_mem(Simulation sim)
{
// Path length and momentum transfer.
free(sim.path_length);
free(sim.mom_transfer);
// Photon fluence.
free(sim.fbox);
free(sim.det.hit);
}
void free_cpu_params_mem(Simulation sim)
{
// Tissue types.
for(int i = 0; i < sim.grid.dim.x; i++) {
for(int j = 0; j < sim.grid.dim.y; j++) {
free(sim.grid.media_type[i][j]);
}
free(sim.grid.media_type[i]);
}
free(sim.grid.media_type);
// Detectors' locations and radii.
free(sim.det.info);
// Optical properties of the different tissue types.
free(sim.tiss.prop);
}
void free_mem(Simulation sim, GPUMemory gmem)
{
free_gpu_params_mem(gmem); free_gpu_results_mem(gmem);
free_cpu_params_mem(sim); free_cpu_results_mem(sim);
}
void retrieve(Simulation *sim, GPUMemory *gmem)
{
//size_t sizeof_tissueArrays = sim->n_photons * (sim->tiss.num + 1) * sizeof(float);
size_t sizeof_tissueArrays = (1 << NUM_HASH_BITS) * sizeof(float);
size_t sizeof_fbox = sim->grid.nIxyz * sim->num_time_steps * sizeof(float);
size_t sizeof_det_hit = sim->n_photons * sizeof(int8_t);
TO_HOST(sim->path_length, gmem->path_length, sizeof_tissueArrays);
TO_HOST(sim->mom_transfer, gmem->mom_transfer, sizeof_tissueArrays);
TO_HOST(sim->fbox, gmem->fbox, sizeof_fbox);
TO_HOST(sim->det.hit, gmem->det_hit, sizeof_det_hit);
}
|
8e6d7dbd3afee25a4869256b1eb32d1aa52c0bf5.hip | // !!! This is a file automatically generated by hipify!!!
#include <call_kernel.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <assert.h>
static const int WORK_SIZE = /*256*/ 2;
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
__device__ unsigned int bitreverse1(unsigned int number) {
number = ((0xf0f0f0f0 & number) >> 4) | ((0x0f0f0f0f & number) << 4);
number = ((0xcccccccc & number) >> 2) | ((0x33333333 & number) << 2);
number = ((0xaaaaaaaa & number) >> 1) | ((0x55555555 & number) << 1);
return number;
}
/**
* CUDA kernel function that reverses the order of bits in each element of the array.
*/
__global__ void bitreverse(void *data) {
unsigned int *idata = (unsigned int*) data;
idata[threadIdx.x] = bitreverse1(idata[threadIdx.x]);
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main() {
void *d = NULL;
int i;
unsigned int idata[WORK_SIZE], odata[WORK_SIZE];
for (i = 0; i < WORK_SIZE; i++){
idata[i] = (unsigned int) i+1;
printf("%u; ", idata[i]);
}
printf("\n");
hipMalloc((void**) &d, sizeof(int) * WORK_SIZE);
hipMemcpy(d, idata, sizeof(int) * WORK_SIZE, hipMemcpyHostToDevice);
// bitreverse<<<1, WORK_SIZE, WORK_SIZE * sizeof(int)>>>(d);
ESBMC_verify_kernel(bitreverse, 1, WORK_SIZE /* *sizeof(int)*/, d);
hipDeviceSynchronize(); // Wait for the GPU launched work to complete
hipGetLastError();
hipMemcpy(odata, d, sizeof(int) * WORK_SIZE, hipMemcpyDeviceToHost);
for (i = 0; i < WORK_SIZE; i++){
printf("Input value: %u, device output: %u\n", idata[i], odata[i]);
assert((idata[i]==1)and(odata[i]==128));
}
hipFree((void*) d);
hipDeviceReset();
return 0;
}
| 8e6d7dbd3afee25a4869256b1eb32d1aa52c0bf5.cu | #include <call_kernel.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <assert.h>
static const int WORK_SIZE = /*256*/ 2;
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
__device__ unsigned int bitreverse1(unsigned int number) {
number = ((0xf0f0f0f0 & number) >> 4) | ((0x0f0f0f0f & number) << 4);
number = ((0xcccccccc & number) >> 2) | ((0x33333333 & number) << 2);
number = ((0xaaaaaaaa & number) >> 1) | ((0x55555555 & number) << 1);
return number;
}
/**
* CUDA kernel function that reverses the order of bits in each element of the array.
*/
__global__ void bitreverse(void *data) {
unsigned int *idata = (unsigned int*) data;
idata[threadIdx.x] = bitreverse1(idata[threadIdx.x]);
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main() {
void *d = NULL;
int i;
unsigned int idata[WORK_SIZE], odata[WORK_SIZE];
for (i = 0; i < WORK_SIZE; i++){
idata[i] = (unsigned int) i+1;
printf("%u; ", idata[i]);
}
printf("\n");
cudaMalloc((void**) &d, sizeof(int) * WORK_SIZE);
cudaMemcpy(d, idata, sizeof(int) * WORK_SIZE, cudaMemcpyHostToDevice);
// bitreverse<<<1, WORK_SIZE, WORK_SIZE * sizeof(int)>>>(d);
ESBMC_verify_kernel(bitreverse, 1, WORK_SIZE /* *sizeof(int)*/, d);
cudaThreadSynchronize(); // Wait for the GPU launched work to complete
cudaGetLastError();
cudaMemcpy(odata, d, sizeof(int) * WORK_SIZE, cudaMemcpyDeviceToHost);
for (i = 0; i < WORK_SIZE; i++){
printf("Input value: %u, device output: %u\n", idata[i], odata[i]);
assert((idata[i]==1)and(odata[i]==128));
}
cudaFree((void*) d);
cudaDeviceReset();
return 0;
}
|
177d2942717f48e567fa63515ca33bdfe69b7f31.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <vector>
__global__ void fillBucket(int* key, int *bucket) {
int i = threadIdx.x;
atomicAdd(&bucket[key[i]], 1);
}
__global__ void fillKey(int *key, int *bucket) {
int i = threadIdx.x;
int j = bucket[i];
for (int k=1; k<8; k<<=1) {
int n = __shfl_up_sync(0xffffffff, j, k);
if (i >= k) j += n;
}
j -= bucket[i];
for (; bucket[i]>0; bucket[i]--)
key[j++] = i;
}
int main() {
int n = 50;
int range = 5;
int *key, *bucket;
hipMallocManaged(&key, n*sizeof(int));
hipMallocManaged(&bucket, range*sizeof(int));
for (int i=0; i<n; i++) {
key[i] = rand() % range;
printf("%d ",key[i]);
}
printf("\n");
for (int i=0; i<range; i++)
bucket[i] = 0;
hipLaunchKernelGGL(( fillBucket), dim3(1),dim3(n), 0, 0, key, bucket);
hipLaunchKernelGGL(( fillKey), dim3(1),dim3(range), 0, 0, key, bucket);
hipDeviceSynchronize();
for (int i=0; i<n; i++)
printf("%d ",key[i]);
printf("\n");
}
| 177d2942717f48e567fa63515ca33bdfe69b7f31.cu | #include <cstdio>
#include <cstdlib>
#include <vector>
__global__ void fillBucket(int* key, int *bucket) {
int i = threadIdx.x;
atomicAdd(&bucket[key[i]], 1);
}
__global__ void fillKey(int *key, int *bucket) {
int i = threadIdx.x;
int j = bucket[i];
for (int k=1; k<8; k<<=1) {
int n = __shfl_up_sync(0xffffffff, j, k);
if (i >= k) j += n;
}
j -= bucket[i];
for (; bucket[i]>0; bucket[i]--)
key[j++] = i;
}
int main() {
int n = 50;
int range = 5;
int *key, *bucket;
cudaMallocManaged(&key, n*sizeof(int));
cudaMallocManaged(&bucket, range*sizeof(int));
for (int i=0; i<n; i++) {
key[i] = rand() % range;
printf("%d ",key[i]);
}
printf("\n");
for (int i=0; i<range; i++)
bucket[i] = 0;
fillBucket<<<1,n>>>(key, bucket);
fillKey<<<1,range>>>(key, bucket);
cudaDeviceSynchronize();
for (int i=0; i<n; i++)
printf("%d ",key[i]);
printf("\n");
}
|
c9584ccbc1af99bbcaaa73f4c306f7ca6da53643.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "compress.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *mat = NULL;
hipMalloc(&mat, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
float *comp = NULL;
hipMalloc(&comp, XSIZE*YSIZE);
int k = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
compress), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,n,comp,k);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
compress), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,n,comp,k);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
compress), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,n,comp,k);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c9584ccbc1af99bbcaaa73f4c306f7ca6da53643.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "compress.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *mat = NULL;
cudaMalloc(&mat, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
float *comp = NULL;
cudaMalloc(&comp, XSIZE*YSIZE);
int k = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
compress<<<gridBlock,threadBlock>>>(mat,n,comp,k);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
compress<<<gridBlock,threadBlock>>>(mat,n,comp,k);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
compress<<<gridBlock,threadBlock>>>(mat,n,comp,k);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7b31bfa3a7626bbafffa77e7c6850ca8ce92b2c7.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////
// GPU version of Monte Carlo algorithm using NVIDIA's CURAND library
////////////////////////////////////////////////////////////////////////
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////
// CUDA global constants
////////////////////////////////////////////////////////////////////////
__constant__ int N;
__constant__ float T, r, sigma, rho, alpha, dt, con1, con2;
////////////////////////////////////////////////////////////////////////
// kernel routine
////////////////////////////////////////////////////////////////////////
__global__ void pathcalc(float *d_z, float *d_v)
{
float s1, s2, y1, y2, payoff;
int ind;
// move array pointers to correct position
// version 1
// ind = threadIdx.x + 2*N*blockIdx.x*blockDim.x;
// version 2
ind = 2*N*threadIdx.x + 2*N*blockIdx.x*blockDim.x;
// path calculation
s1 = 1.0f;
s2 = 1.0f;
for (int n=0; n<N; n++) {
y1 = d_z[ind];
// version 1
// ind += blockDim.x; // shift pointer to next element
// version 2
ind += 1;
y2 = rho*y1 + alpha*d_z[ind];
// version 1
// ind += blockDim.x; // shift pointer to next element
// version 2
ind += 1;
s1 = s1*(con1 + con2*y1);
s2 = s2*(con1 + con2*y2);
}
// put payoff value into device array
payoff = 0.0f;
if ( fabs(s1-1.0f)<0.1f && fabs(s2-1.0f)<0.1f ) payoff = exp(-r*T);
d_v[threadIdx.x + blockIdx.x*blockDim.x] = payoff;
}
////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv){
int NPATH=960000, h_N=100;
float h_T, h_r, h_sigma, h_rho, h_alpha, h_dt, h_con1, h_con2;
float *h_v, *d_v, *d_z;
double sum1, sum2;
// initialise card
findCudaDevice(argc, argv);
// initialise CUDA timing
float milli;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// allocate memory on host and device
h_v = (float *)malloc(sizeof(float)*NPATH);
checkCudaErrors( hipMalloc((void **)&d_v, sizeof(float)*NPATH) );
checkCudaErrors( hipMalloc((void **)&d_z, sizeof(float)*2*h_N*NPATH) );
// define constants and transfer to GPU
h_T = 1.0f;
h_r = 0.05f;
h_sigma = 0.1f;
h_rho = 0.5f;
h_alpha = sqrt(1.0f-h_rho*h_rho);
h_dt = 1.0f/h_N;
h_con1 = 1.0f + h_r*h_dt;
h_con2 = sqrt(h_dt)*h_sigma;
checkCudaErrors( hipMemcpyToSymbol(N, &h_N, sizeof(h_N)) );
checkCudaErrors( hipMemcpyToSymbol(T, &h_T, sizeof(h_T)) );
checkCudaErrors( hipMemcpyToSymbol(r, &h_r, sizeof(h_r)) );
checkCudaErrors( hipMemcpyToSymbol(sigma,&h_sigma,sizeof(h_sigma)) );
checkCudaErrors( hipMemcpyToSymbol(rho, &h_rho, sizeof(h_rho)) );
checkCudaErrors( hipMemcpyToSymbol(alpha,&h_alpha,sizeof(h_alpha)) );
checkCudaErrors( hipMemcpyToSymbol(dt, &h_dt, sizeof(h_dt)) );
checkCudaErrors( hipMemcpyToSymbol(con1, &h_con1, sizeof(h_con1)) );
checkCudaErrors( hipMemcpyToSymbol(con2, &h_con2, sizeof(h_con2)) );
// random number generation
hipEventRecord(start);
hiprandGenerator_t gen;
checkCudaErrors( hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT) );
checkCudaErrors( hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL) );
checkCudaErrors( hiprandGenerateNormal(gen, d_z, 2*h_N*NPATH, 0.0f, 1.0f) );
hipEventRecord(stop);
hipEventSynchronize(stop); // ensure all the threads in GPU finish
hipEventElapsedTime(&milli, start, stop);
printf("CURAND normal RNG execution time (ms): %f, samples/sec: %e \n",
milli, 2.0*h_N*NPATH/(0.001*milli));
// execute kernel and time it
hipEventRecord(start);
hipLaunchKernelGGL(( pathcalc), dim3(NPATH/64), dim3(64), 0, 0, d_z, d_v);
getLastCudaError("pathcalc execution failed\n");
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("Monte Carlo kernel execution time (ms): %f \n",milli);
// copy back results
checkCudaErrors( hipMemcpy(h_v, d_v, sizeof(float)*NPATH,
hipMemcpyDeviceToHost) );
// compute average
sum1 = 0.0;
sum2 = 0.0;
for (int i=0; i<NPATH; i++) {
sum1 += h_v[i];
sum2 += h_v[i]*h_v[i];
}
printf("\nAverage value and standard deviation of error = %13.8f %13.8f\n\n",
sum1/NPATH, sqrt((sum2/NPATH - (sum1/NPATH)*(sum1/NPATH))/NPATH) );
// Tidy up library
checkCudaErrors( hiprandDestroyGenerator(gen) );
// Release memory and exit cleanly
free(h_v);
checkCudaErrors( hipFree(d_v) );
checkCudaErrors( hipFree(d_z) );
// CUDA exit -- needed to flush printf write buffer
hipDeviceReset();
}
| 7b31bfa3a7626bbafffa77e7c6850ca8ce92b2c7.cu |
////////////////////////////////////////////////////////////////////////
// GPU version of Monte Carlo algorithm using NVIDIA's CURAND library
////////////////////////////////////////////////////////////////////////
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#include <curand.h>
#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////
// CUDA global constants
////////////////////////////////////////////////////////////////////////
__constant__ int N;
__constant__ float T, r, sigma, rho, alpha, dt, con1, con2;
////////////////////////////////////////////////////////////////////////
// kernel routine
////////////////////////////////////////////////////////////////////////
__global__ void pathcalc(float *d_z, float *d_v)
{
float s1, s2, y1, y2, payoff;
int ind;
// move array pointers to correct position
// version 1
// ind = threadIdx.x + 2*N*blockIdx.x*blockDim.x;
// version 2
ind = 2*N*threadIdx.x + 2*N*blockIdx.x*blockDim.x;
// path calculation
s1 = 1.0f;
s2 = 1.0f;
for (int n=0; n<N; n++) {
y1 = d_z[ind];
// version 1
// ind += blockDim.x; // shift pointer to next element
// version 2
ind += 1;
y2 = rho*y1 + alpha*d_z[ind];
// version 1
// ind += blockDim.x; // shift pointer to next element
// version 2
ind += 1;
s1 = s1*(con1 + con2*y1);
s2 = s2*(con1 + con2*y2);
}
// put payoff value into device array
payoff = 0.0f;
if ( fabs(s1-1.0f)<0.1f && fabs(s2-1.0f)<0.1f ) payoff = exp(-r*T);
d_v[threadIdx.x + blockIdx.x*blockDim.x] = payoff;
}
////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv){
int NPATH=960000, h_N=100;
float h_T, h_r, h_sigma, h_rho, h_alpha, h_dt, h_con1, h_con2;
float *h_v, *d_v, *d_z;
double sum1, sum2;
// initialise card
findCudaDevice(argc, argv);
// initialise CUDA timing
float milli;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// allocate memory on host and device
h_v = (float *)malloc(sizeof(float)*NPATH);
checkCudaErrors( cudaMalloc((void **)&d_v, sizeof(float)*NPATH) );
checkCudaErrors( cudaMalloc((void **)&d_z, sizeof(float)*2*h_N*NPATH) );
// define constants and transfer to GPU
h_T = 1.0f;
h_r = 0.05f;
h_sigma = 0.1f;
h_rho = 0.5f;
h_alpha = sqrt(1.0f-h_rho*h_rho);
h_dt = 1.0f/h_N;
h_con1 = 1.0f + h_r*h_dt;
h_con2 = sqrt(h_dt)*h_sigma;
checkCudaErrors( cudaMemcpyToSymbol(N, &h_N, sizeof(h_N)) );
checkCudaErrors( cudaMemcpyToSymbol(T, &h_T, sizeof(h_T)) );
checkCudaErrors( cudaMemcpyToSymbol(r, &h_r, sizeof(h_r)) );
checkCudaErrors( cudaMemcpyToSymbol(sigma,&h_sigma,sizeof(h_sigma)) );
checkCudaErrors( cudaMemcpyToSymbol(rho, &h_rho, sizeof(h_rho)) );
checkCudaErrors( cudaMemcpyToSymbol(alpha,&h_alpha,sizeof(h_alpha)) );
checkCudaErrors( cudaMemcpyToSymbol(dt, &h_dt, sizeof(h_dt)) );
checkCudaErrors( cudaMemcpyToSymbol(con1, &h_con1, sizeof(h_con1)) );
checkCudaErrors( cudaMemcpyToSymbol(con2, &h_con2, sizeof(h_con2)) );
// random number generation
cudaEventRecord(start);
curandGenerator_t gen;
checkCudaErrors( curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT) );
checkCudaErrors( curandSetPseudoRandomGeneratorSeed(gen, 1234ULL) );
checkCudaErrors( curandGenerateNormal(gen, d_z, 2*h_N*NPATH, 0.0f, 1.0f) );
cudaEventRecord(stop);
cudaEventSynchronize(stop); // ensure all the threads in GPU finish
cudaEventElapsedTime(&milli, start, stop);
printf("CURAND normal RNG execution time (ms): %f, samples/sec: %e \n",
milli, 2.0*h_N*NPATH/(0.001*milli));
// execute kernel and time it
cudaEventRecord(start);
pathcalc<<<NPATH/64, 64>>>(d_z, d_v);
getLastCudaError("pathcalc execution failed\n");
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("Monte Carlo kernel execution time (ms): %f \n",milli);
// copy back results
checkCudaErrors( cudaMemcpy(h_v, d_v, sizeof(float)*NPATH,
cudaMemcpyDeviceToHost) );
// compute average
sum1 = 0.0;
sum2 = 0.0;
for (int i=0; i<NPATH; i++) {
sum1 += h_v[i];
sum2 += h_v[i]*h_v[i];
}
printf("\nAverage value and standard deviation of error = %13.8f %13.8f\n\n",
sum1/NPATH, sqrt((sum2/NPATH - (sum1/NPATH)*(sum1/NPATH))/NPATH) );
// Tidy up library
checkCudaErrors( curandDestroyGenerator(gen) );
// Release memory and exit cleanly
free(h_v);
checkCudaErrors( cudaFree(d_v) );
checkCudaErrors( cudaFree(d_z) );
// CUDA exit -- needed to flush printf write buffer
cudaDeviceReset();
}
|
b2c210f84abe38f3f9172e8a6dc9e8d9cffe6865.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zmgesellcmmv.cu, normal z -> s, Thu Oct 8 23:05:50 2020
*/
#include "magmasparse_internal.h"
#define PRECISION_s
//#define TEXTURE
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning one thread to each row - 1D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_1_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if (row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int max_ = (drowptr[ bdx+1 ]-offset)/blocksize;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + idx + blocksize*k ];
int col =
dcolind[ offset + idx + blocksize*k ];
dot += val * dx[ col*num_vecs+idy ];
}
if (betazero) {
dy[ row+idy*num_rows ] = dot*alpha;
} else {
dy[ row+idy*num_rows ] = dot*alpha + beta*dy [ row+idy*num_rows ];
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_4_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_8_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
const float * __restrict__ dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_16_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 8 ) {
shared[ldz]+=shared[ldz+blocksize*8];
__syncthreads();
if ( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_32_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 16 ) {
shared[ldz]+=shared[ldz+blocksize*16];
__syncthreads();
if ( idx < 8 ) shared[ldz]+=shared[ldz+blocksize*8];
__syncthreads();
if ( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
/************************* same but using texture mem *************************/
// SELLP SpMV kernel 2D grid - for large number of vectors
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_1_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
float beta,
float * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if (row < num_rows ) {
float dot1 = MAGMA_S_MAKE(0.0, 0.0);
float dot2 = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int max_ = (drowptr[ bdx+1 ]-offset)/blocksize;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + idx + blocksize*k ];
int col =
num_vecs * dcolind[ offset + idx + blocksize*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idy );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
if (betazero) {
dy[row+num_rows*idy*2] =
dot1*alpha;
dy[row+num_rows*idy*2+num_rows] =
dot2*alpha;
} else {
dy[row+num_rows*idy*2] =
dot1*alpha
+ beta*dy [row*num_vecs+idy*2];
dy[row+num_rows*idy*2+num_rows] =
dot2*alpha
+ beta*dy [row*num_vecs+idy*2+1];
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_4_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
float beta,
float * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot1 = MAGMA_S_MAKE(0.0, 0.0);
float dot2 = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_8_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
float beta,
float * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot1 = MAGMA_S_MAKE(0.0, 0.0);
float dot2 = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
__syncthreads();
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_16_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
float beta,
float * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot1 = MAGMA_S_MAKE(0.0, 0.0);
float dot2 = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 8 ) {
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
__syncthreads();
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_32_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
float beta,
float * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot1 = MAGMA_S_MAKE(0.0, 0.0);
float dot2 = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 16 ) {
shared[ldz]+=shared[ldz+blocksize*16];
shared[ldz+sv]+=shared[ldz+sv+blocksize*16];
__syncthreads();
if ( idx < 8 ) {
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
}
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
/**
Purpose
-------
This routine computes Y = alpha * A^t * X + beta * Y on the GPU.
Input format is SELLP. Note, that the input format for X is row-major
while the output format for Y is column major!
Arguments
---------
@param[in]
transA magma_trans_t
transpose A?
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs magma_int_t
number of columns in X and Y
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row
@param[in]
alpha float
scalar multiplier
@param[in]
dval magmaFloat_ptr
array containing values of A in SELLP
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLP
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaFloat_ptr
input vector x
@param[in]
beta float
scalar multiplier
@param[out]
dy magmaFloat_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_smgesellpmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
float alpha,
magmaFloat_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaFloat_ptr dx,
float beta,
magmaFloat_ptr dy,
magma_queue_t queue )
{
// using a 3D thread grid for small num_vecs, a 2D grid otherwise
int texture=0, kepler=0, precision=0;
magma_int_t arch = magma_getdevice_arch();
if ( arch > 300 )
kepler = 1;
#if defined(PRECISION_d)
precision = 1;
#endif
#if defined(TEXTURE)
texture = 1;
#endif
if ( (texture==1) && (precision==1) && (kepler==1) ) {
// Create channel.
hipChannelFormatDesc channel_desc;
channel_desc = hipCreateChannelDesc(32, 32, 32, 32,
hipChannelFormatKindSigned);
// Create resource descriptor.
struct hipResourceDesc resDescdx;
memset(&resDescdx, 0, sizeof(resDescdx));
resDescdx.resType = hipResourceTypeLinear;
resDescdx.res.linear.devPtr = (void*)dx;
resDescdx.res.linear.desc = channel_desc;
resDescdx.res.linear.sizeInBytes = m * num_vecs * sizeof(float);
// Specify texture object parameters.
struct hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.filterMode = hipFilterModePoint;
texDesc.readMode = hipReadModeElementType;
// Create texture object.
hipTextureObject_t texdx = 0;
hipCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL);
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
if ( num_vecs%2 ==1 ) { // only multiple of 2 can be processed
printf("error: number of vectors has to be multiple of 2.\n");
return MAGMA_ERR_NOT_SUPPORTED;
}
if ( num_vecs > 8 ) // avoid running into memory problems
alignment = 1;
int num_threads = (num_vecs/2) * blocksize*alignment;
// every thread handles two vectors
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
dim3 block( blocksize, alignment, num_vecs/2 );
int dimgrid1 = int( sqrt( float( slices )));
int dimgrid2 = magma_ceildiv( slices, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_vecs * blocksize*alignment * sizeof( float );
if ( alignment == 1) {
dim3 block( blocksize, num_vecs/2, 1 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D_tex<true>), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D_tex<false>), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 4) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 8) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 16) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 32) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
} else {
if ( num_vecs%2 ==1 ) { // only multiple of 2 can be processed
printf("error: number of vectors has to be multiple of 2.\n");
return MAGMA_ERR_NOT_SUPPORTED;
}
if ( num_vecs > 8 ) // avoid running into memory problems
alignment = 1;
int num_threads = num_vecs * blocksize*alignment;
// every thread handles two vectors
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int dimgrid1 = int( sqrt( float( slices )));
int dimgrid2 = magma_ceildiv( slices, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_threads * sizeof( float );
if ( alignment == 1) {
dim3 block( blocksize, num_vecs/2, 1 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D<true>), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D<false>), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 4) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 8) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 16) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 32) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
}
return MAGMA_SUCCESS;
}
| b2c210f84abe38f3f9172e8a6dc9e8d9cffe6865.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zmgesellcmmv.cu, normal z -> s, Thu Oct 8 23:05:50 2020
*/
#include "magmasparse_internal.h"
#define PRECISION_s
//#define TEXTURE
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning one thread to each row - 1D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_1_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if (row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int max_ = (drowptr[ bdx+1 ]-offset)/blocksize;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + idx + blocksize*k ];
int col =
dcolind[ offset + idx + blocksize*k ];
dot += val * dx[ col*num_vecs+idy ];
}
if (betazero) {
dy[ row+idy*num_rows ] = dot*alpha;
} else {
dy[ row+idy*num_rows ] = dot*alpha + beta*dy [ row+idy*num_rows ];
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_4_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_8_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
const float * __restrict__ dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_16_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 8 ) {
shared[ldz]+=shared[ldz+blocksize*8];
__syncthreads();
if ( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_32_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 16 ) {
shared[ldz]+=shared[ldz+blocksize*16];
__syncthreads();
if ( idx < 8 ) shared[ldz]+=shared[ldz+blocksize*8];
__syncthreads();
if ( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
/************************* same but using texture mem *************************/
// SELLP SpMV kernel 2D grid - for large number of vectors
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_1_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
float beta,
float * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if (row < num_rows ) {
float dot1 = MAGMA_S_MAKE(0.0, 0.0);
float dot2 = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int max_ = (drowptr[ bdx+1 ]-offset)/blocksize;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + idx + blocksize*k ];
int col =
num_vecs * dcolind[ offset + idx + blocksize*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idy );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
if (betazero) {
dy[row+num_rows*idy*2] =
dot1*alpha;
dy[row+num_rows*idy*2+num_rows] =
dot2*alpha;
} else {
dy[row+num_rows*idy*2] =
dot1*alpha
+ beta*dy [row*num_vecs+idy*2];
dy[row+num_rows*idy*2+num_rows] =
dot2*alpha
+ beta*dy [row*num_vecs+idy*2+1];
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_4_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
float beta,
float * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot1 = MAGMA_S_MAKE(0.0, 0.0);
float dot2 = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_8_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
float beta,
float * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot1 = MAGMA_S_MAKE(0.0, 0.0);
float dot2 = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
__syncthreads();
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_16_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
float beta,
float * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot1 = MAGMA_S_MAKE(0.0, 0.0);
float dot2 = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 8 ) {
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
__syncthreads();
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_32_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
float beta,
float * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot1 = MAGMA_S_MAKE(0.0, 0.0);
float dot2 = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 16 ) {
shared[ldz]+=shared[ldz+blocksize*16];
shared[ldz+sv]+=shared[ldz+sv+blocksize*16];
__syncthreads();
if ( idx < 8 ) {
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
}
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
/**
Purpose
-------
This routine computes Y = alpha * A^t * X + beta * Y on the GPU.
Input format is SELLP. Note, that the input format for X is row-major
while the output format for Y is column major!
Arguments
---------
@param[in]
transA magma_trans_t
transpose A?
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs magma_int_t
number of columns in X and Y
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row
@param[in]
alpha float
scalar multiplier
@param[in]
dval magmaFloat_ptr
array containing values of A in SELLP
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLP
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaFloat_ptr
input vector x
@param[in]
beta float
scalar multiplier
@param[out]
dy magmaFloat_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_smgesellpmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
float alpha,
magmaFloat_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaFloat_ptr dx,
float beta,
magmaFloat_ptr dy,
magma_queue_t queue )
{
// using a 3D thread grid for small num_vecs, a 2D grid otherwise
int texture=0, kepler=0, precision=0;
magma_int_t arch = magma_getdevice_arch();
if ( arch > 300 )
kepler = 1;
#if defined(PRECISION_d)
precision = 1;
#endif
#if defined(TEXTURE)
texture = 1;
#endif
if ( (texture==1) && (precision==1) && (kepler==1) ) {
// Create channel.
cudaChannelFormatDesc channel_desc;
channel_desc = cudaCreateChannelDesc(32, 32, 32, 32,
cudaChannelFormatKindSigned);
// Create resource descriptor.
struct cudaResourceDesc resDescdx;
memset(&resDescdx, 0, sizeof(resDescdx));
resDescdx.resType = cudaResourceTypeLinear;
resDescdx.res.linear.devPtr = (void*)dx;
resDescdx.res.linear.desc = channel_desc;
resDescdx.res.linear.sizeInBytes = m * num_vecs * sizeof(float);
// Specify texture object parameters.
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModePoint;
texDesc.readMode = cudaReadModeElementType;
// Create texture object.
cudaTextureObject_t texdx = 0;
cudaCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL);
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
if ( num_vecs%2 ==1 ) { // only multiple of 2 can be processed
printf("error: number of vectors has to be multiple of 2.\n");
return MAGMA_ERR_NOT_SUPPORTED;
}
if ( num_vecs > 8 ) // avoid running into memory problems
alignment = 1;
int num_threads = (num_vecs/2) * blocksize*alignment;
// every thread handles two vectors
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
dim3 block( blocksize, alignment, num_vecs/2 );
int dimgrid1 = int( sqrt( float( slices )));
int dimgrid2 = magma_ceildiv( slices, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_vecs * blocksize*alignment * sizeof( float );
if ( alignment == 1) {
dim3 block( blocksize, num_vecs/2, 1 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_1_3D_tex<true><<< grid, block, 0, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
zmgesellptmv_kernel_1_3D_tex<false><<< grid, block, 0, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 4) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_4_3D_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
zmgesellptmv_kernel_4_3D_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 8) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_8_3D_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
zmgesellptmv_kernel_8_3D_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 16) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_16_3D_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
zmgesellptmv_kernel_16_3D_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 32) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_32_3D_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
zmgesellptmv_kernel_32_3D_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
} else {
if ( num_vecs%2 ==1 ) { // only multiple of 2 can be processed
printf("error: number of vectors has to be multiple of 2.\n");
return MAGMA_ERR_NOT_SUPPORTED;
}
if ( num_vecs > 8 ) // avoid running into memory problems
alignment = 1;
int num_threads = num_vecs * blocksize*alignment;
// every thread handles two vectors
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int dimgrid1 = int( sqrt( float( slices )));
int dimgrid2 = magma_ceildiv( slices, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_threads * sizeof( float );
if ( alignment == 1) {
dim3 block( blocksize, num_vecs/2, 1 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_1_3D<true><<< grid, block, 0, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
zmgesellptmv_kernel_1_3D<false><<< grid, block, 0, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 4) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_4_3D<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
zmgesellptmv_kernel_4_3D<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 8) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_8_3D<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
zmgesellptmv_kernel_8_3D<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 16) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_16_3D<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
zmgesellptmv_kernel_16_3D<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 32) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_32_3D<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
zmgesellptmv_kernel_32_3D<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
}
return MAGMA_SUCCESS;
}
|
6436f7a4383a42df843ef44c7c7ede65b59ac8f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Defines the basic matrix operations for the AIJ (compressed row)
matrix storage format using the CUSPARSE library,
*/
#define PETSC_SKIP_SPINLOCK
#define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1
#include <petscconf.h>
#include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
#include <../src/mat/impls/sbaij/seq/sbaij.h>
#include <../src/vec/vec/impls/dvecimpl.h>
#include <petsc/private/vecimpl.h>
#undef VecType
#include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h>
#include <thrust/async/for_each.h>
const char *const MatCUSPARSEStorageFormats[] = {"CSR","ELL","HYB","MatCUSPARSEStorageFormat","MAT_CUSPARSE_",0};
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
/* The following are copied from hipsparse.h in CUDA-11.0. In MatCUSPARSESpMVAlgorithms[] etc, we copy them in
0-based integer value order, since we want to use PetscOptionsEnum() to parse user command line options for them.
typedef enum {
HIPSPARSE_MV_ALG_DEFAULT = 0,
HIPSPARSE_COOMV_ALG = 1,
HIPSPARSE_CSRMV_ALG1 = 2,
HIPSPARSE_CSRMV_ALG2 = 3
} hipsparseSpMVAlg_t;
typedef enum {
HIPSPARSE_MM_ALG_DEFAULT CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_ALG_DEFAULT) = 0,
HIPSPARSE_COOMM_ALG1 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_SPMM_COO_ALG1) = 1,
HIPSPARSE_COOMM_ALG2 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_SPMM_COO_ALG2) = 2,
HIPSPARSE_COOMM_ALG3 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG3) = 3,
HIPSPARSE_CSRMM_ALG1 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_CSRMM_ALG1) = 4,
CUSPARSE_SPMM_ALG_DEFAULT = 0,
HIPSPARSE_SPMM_COO_ALG1 = 1,
HIPSPARSE_SPMM_COO_ALG2 = 2,
CUSPARSE_SPMM_COO_ALG3 = 3,
CUSPARSE_SPMM_COO_ALG4 = 5,
HIPSPARSE_CSRMM_ALG1 = 4,
CUSPARSE_SPMM_CSR_ALG2 = 6,
} hipsparseSpMMAlg_t;
typedef enum {
HIPSPARSE_CSR2CSC_ALG1 = 1, // faster than V2 (in general), deterministc
HIPSPARSE_CSR2CSC_ALG2 = 2 // low memory requirement, non-deterministc
} hipsparseCsr2CscAlg_t;
*/
const char *const MatCUSPARSESpMVAlgorithms[] = {"MV_ALG_DEFAULT","COOMV_ALG", "CSRMV_ALG1","CSRMV_ALG2", "hipsparseSpMVAlg_t","CUSPARSE_",0};
const char *const MatCUSPARSESpMMAlgorithms[] = {"ALG_DEFAULT","COO_ALG1","COO_ALG2","COO_ALG3","CSR_ALG1","COO_ALG4","CSR_ALG2","hipsparseSpMMAlg_t","CUSPARSE_SPMM_",0};
const char *const MatCUSPARSECsr2CscAlgorithms[] = {"INVALID"/*cusparse does not have enum 0! We created one*/,"ALG1","ALG2","hipsparseCsr2CscAlg_t","CUSPARSE_CSR2CSC_",0};
#endif
static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*);
static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*);
static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*);
static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*);
static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*);
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*);
static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec);
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec);
static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat);
static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat,PetscScalar,Mat,MatStructure);
static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat,PetscScalar);
static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec);
static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec);
static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec);
static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec,PetscBool,PetscBool);
static PetscErrorCode CsrMatrix_Destroy(CsrMatrix**);
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct**);
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct**,MatCUSPARSEStorageFormat);
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors**);
static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE**);
PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat);
static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat);
static PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat,PetscBool);
PETSC_INTERN PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],const PetscInt[]);
PETSC_INTERN PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat,const PetscScalar[],InsertMode);
static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],PetscScalar[]);
PetscErrorCode MatCUSPARSESetStream(Mat A,const hipStream_t stream)
{
hipsparseStatus_t stat;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
if (!cusparsestruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr");
cusparsestruct->stream = stream;
stat = hipsparseSetStream(cusparsestruct->handle,cusparsestruct->stream);CHKERRCUSPARSE(stat);
PetscFunctionReturn(0);
}
PetscErrorCode MatCUSPARSESetHandle(Mat A,const hipsparseHandle_t handle)
{
hipsparseStatus_t stat;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
if (!cusparsestruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr");
if (cusparsestruct->handle != handle) {
if (cusparsestruct->handle) {
stat = hipsparseDestroy(cusparsestruct->handle);CHKERRCUSPARSE(stat);
}
cusparsestruct->handle = handle;
}
stat = hipsparseSetPointerMode(cusparsestruct->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
PetscFunctionReturn(0);
}
PetscErrorCode MatCUSPARSEClearHandle(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscBool flg;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg || !cusparsestruct) PetscFunctionReturn(0);
if (cusparsestruct->handle) cusparsestruct->handle = 0;
PetscFunctionReturn(0);
}
PetscErrorCode MatFactorGetSolverType_seqaij_cusparse(Mat A,MatSolverType *type)
{
PetscFunctionBegin;
*type = MATSOLVERCUSPARSE;
PetscFunctionReturn(0);
}
/*MC
MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices
on a single GPU of type, seqaijcusparse, aijcusparse, or seqaijcusp, aijcusp. Currently supported
algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer
performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the
CUSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these
algorithms are not recommended. This class does NOT support direct solver operations.
Level: beginner
.seealso: PCFactorSetMatSolverType(), MatSolverType, MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation
M*/
PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A,MatFactorType ftype,Mat *B)
{
PetscErrorCode ierr;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr);
ierr = MatSetSizes(*B,n,n,n,n);CHKERRQ(ierr);
(*B)->factortype = ftype;
ierr = MatSetType(*B,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) {
ierr = MatSetBlockSizesFromMats(*B,A,A);CHKERRQ(ierr);
(*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE;
(*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE;
ierr = PetscStrallocpy(MATORDERINGND,(char**)&(*B)->preferredordering[MAT_FACTOR_LU]);CHKERRQ(ierr);
ierr = PetscStrallocpy(MATORDERINGNATURAL,(char**)&(*B)->preferredordering[MAT_FACTOR_ILU]);CHKERRQ(ierr);
ierr = PetscStrallocpy(MATORDERINGNATURAL,(char**)&(*B)->preferredordering[MAT_FACTOR_ILUDT]);CHKERRQ(ierr);
} else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) {
(*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE;
(*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE;
ierr = PetscStrallocpy(MATORDERINGND,(char**)&(*B)->preferredordering[MAT_FACTOR_CHOLESKY]);CHKERRQ(ierr);
ierr = PetscStrallocpy(MATORDERINGNATURAL,(char**)&(*B)->preferredordering[MAT_FACTOR_ICC]);CHKERRQ(ierr);
} else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Factor type not supported for CUSPARSE Matrix Types");
ierr = MatSeqAIJSetPreallocation(*B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr);
(*B)->canuseordering = PETSC_TRUE;
ierr = PetscObjectComposeFunction((PetscObject)(*B),"MatFactorGetSolverType_C",MatFactorGetSolverType_seqaij_cusparse);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
switch (op) {
case MAT_CUSPARSE_MULT:
cusparsestruct->format = format;
break;
case MAT_CUSPARSE_ALL:
cusparsestruct->format = format;
break;
default:
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.",op);
}
PetscFunctionReturn(0);
}
/*@
MatCUSPARSESetFormat - Sets the storage format of CUSPARSE matrices for a particular
operation. Only the MatMult operation can use different GPU storage formats
for MPIAIJCUSPARSE matrices.
Not Collective
Input Parameters:
+ A - Matrix of type SEQAIJCUSPARSE
. op - MatCUSPARSEFormatOperation. SEQAIJCUSPARSE matrices support MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL. MPIAIJCUSPARSE matrices support MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_OFFDIAG, and MAT_CUSPARSE_ALL.
- format - MatCUSPARSEStorageFormat (one of MAT_CUSPARSE_CSR, MAT_CUSPARSE_ELL, MAT_CUSPARSE_HYB. The latter two require CUDA 4.2)
Output Parameter:
Level: intermediate
.seealso: MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation
@*/
PetscErrorCode MatCUSPARSESetFormat(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID,1);
ierr = PetscTryMethod(A,"MatCUSPARSESetFormat_C",(Mat,MatCUSPARSEFormatOperation,MatCUSPARSEStorageFormat),(A,op,format));CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatSetOption_SeqAIJCUSPARSE(Mat A,MatOption op,PetscBool flg)
{
PetscErrorCode ierr;
PetscFunctionBegin;
switch (op) {
case MAT_FORM_EXPLICIT_TRANSPOSE:
/* need to destroy the transpose matrix if present to prevent from logic errors if flg is set to true later */
if (A->form_explicit_transpose && !flg) {ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr);}
A->form_explicit_transpose = flg;
break;
default:
ierr = MatSetOption_SeqAIJ(A,op,flg);CHKERRQ(ierr);
break;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A);
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info)
{
Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
IS isrow = b->row,iscol = b->col;
PetscBool row_identity,col_identity;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
ierr = MatLUFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr);
B->offloadmask = PETSC_OFFLOAD_CPU;
/* determine which version of MatSolve needs to be used. */
ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr);
ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr);
if (row_identity && col_identity) {
B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
} else {
B->ops->solve = MatSolve_SeqAIJCUSPARSE;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
}
/* get the triangular factors */
ierr = MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A)
{
PetscErrorCode ierr;
MatCUSPARSEStorageFormat format;
PetscBool flg;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
ierr = PetscOptionsHead(PetscOptionsObject,"SeqAIJCUSPARSE options");CHKERRQ(ierr);
if (A->factortype == MAT_FACTOR_NONE) {
ierr = PetscOptionsEnum("-mat_cusparse_mult_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV",
"MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT,format);CHKERRQ(ierr);}
ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve",
"MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr);}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
ierr = PetscOptionsEnum("-mat_cusparse_spmv_alg","sets cuSPARSE algorithm used in sparse-mat dense-vector multiplication (SpMV)",
"hipsparseSpMVAlg_t",MatCUSPARSESpMVAlgorithms,(PetscEnum)cusparsestruct->spmvAlg,(PetscEnum*)&cusparsestruct->spmvAlg,&flg);CHKERRQ(ierr);
/* If user did use this option, check its consistency with cuSPARSE, since PetscOptionsEnum() sets enum values based on their position in MatCUSPARSESpMVAlgorithms[] */
#if PETSC_PKG_CUDA_VERSION_GE(11,4,0)
if (flg && CUSPARSE_SPMV_CSR_ALG1 != 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum hipsparseSpMVAlg_t has been changed but PETSc has not been updated accordingly");
#else
if (flg && HIPSPARSE_CSRMV_ALG1 != 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum hipsparseSpMVAlg_t has been changed but PETSc has not been updated accordingly");
#endif
ierr = PetscOptionsEnum("-mat_cusparse_spmm_alg","sets cuSPARSE algorithm used in sparse-mat dense-mat multiplication (SpMM)",
"hipsparseSpMMAlg_t",MatCUSPARSESpMMAlgorithms,(PetscEnum)cusparsestruct->spmmAlg,(PetscEnum*)&cusparsestruct->spmmAlg,&flg);CHKERRQ(ierr);
if (flg && HIPSPARSE_CSRMM_ALG1 != 4) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum hipsparseSpMMAlg_t has been changed but PETSc has not been updated accordingly");
ierr = PetscOptionsEnum("-mat_cusparse_csr2csc_alg","sets cuSPARSE algorithm used in converting CSR matrices to CSC matrices",
"hipsparseCsr2CscAlg_t",MatCUSPARSECsr2CscAlgorithms,(PetscEnum)cusparsestruct->csr2cscAlg,(PetscEnum*)&cusparsestruct->csr2cscAlg,&flg);CHKERRQ(ierr);
if (flg && HIPSPARSE_CSR2CSC_ALG1 != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum hipsparseCsr2CscAlg_t has been changed but PETSc has not been updated accordingly");
#endif
}
ierr = PetscOptionsTail();CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatILUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr);
B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatLUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr);
B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatICCFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr);
B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatCholeskyFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr);
B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt n = A->rmap->n;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
hipsparseStatus_t stat;
const PetscInt *ai = a->i,*aj = a->j,*vi;
const MatScalar *aa = a->a,*v;
PetscInt *AiLo, *AjLo;
PetscInt i,nz, nzLower, offset, rowOffset;
PetscErrorCode ierr;
hipError_t cerr;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(0);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
/* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */
nzLower=n+ai[n]-ai[1];
if (!loTriFactor) {
PetscScalar *AALo;
cerr = hipHostMalloc((void**) &AALo, nzLower*sizeof(PetscScalar));CHKERRCUDA(cerr);
/* Allocate Space for the lower triangular matrix */
cerr = hipHostMalloc((void**) &AiLo, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = hipHostMalloc((void**) &AjLo, nzLower*sizeof(PetscInt));CHKERRCUDA(cerr);
/* Fill the lower triangular matrix */
AiLo[0] = (PetscInt) 0;
AiLo[n] = nzLower;
AjLo[0] = (PetscInt) 0;
AALo[0] = (MatScalar) 1.0;
v = aa;
vi = aj;
offset = 1;
rowOffset= 1;
for (i=1; i<n; i++) {
nz = ai[i+1] - ai[i];
/* additional 1 for the term on the diagonal */
AiLo[i] = rowOffset;
rowOffset += nz+1;
ierr = PetscArraycpy(&(AjLo[offset]), vi, nz);CHKERRQ(ierr);
ierr = PetscArraycpy(&(AALo[offset]), v, nz);CHKERRQ(ierr);
offset += nz;
AjLo[offset] = (PetscInt) i;
AALo[offset] = (MatScalar) 1.0;
offset += 1;
v += nz;
vi += nz;
}
/* allocate space for the triangular factor information */
ierr = PetscNew(&loTriFactor);CHKERRQ(ierr);
loTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = hipsparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(loTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = hipsparseSetMatFillMode(loTriFactor->descr, HIPSPARSE_FILL_MODE_LOWER);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatDiagType(loTriFactor->descr, HIPSPARSE_DIAG_TYPE_UNIT);CHKERRCUSPARSE(stat);
/* set the operation */
loTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* set the matrix */
loTriFactor->csrMat = new CsrMatrix;
loTriFactor->csrMat->num_rows = n;
loTriFactor->csrMat->num_cols = n;
loTriFactor->csrMat->num_entries = nzLower;
loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1);
loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo+n+1);
loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower);
loTriFactor->csrMat->column_indices->assign(AjLo, AjLo+nzLower);
loTriFactor->csrMat->values = new THRUSTARRAY(nzLower);
loTriFactor->csrMat->values->assign(AALo, AALo+nzLower);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&loTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo,
&loTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&loTriFactor->solveBuffer,loTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactor->solveInfo,
loTriFactor->solvePolicy, loTriFactor->solveBuffer);CHKERRCUSPARSE(stat);
#else
loTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor;
loTriFactor->AA_h = AALo;
cerr = hipHostFree(AiLo);CHKERRCUDA(cerr);
cerr = hipHostFree(AjLo);CHKERRCUDA(cerr);
ierr = PetscLogCpuToGpu((n+1+nzLower)*sizeof(int)+nzLower*sizeof(PetscScalar));CHKERRQ(ierr);
} else { /* update values only */
if (!loTriFactor->AA_h) {
cerr = hipHostMalloc((void**) &loTriFactor->AA_h, nzLower*sizeof(PetscScalar));CHKERRCUDA(cerr);
}
/* Fill the lower triangular matrix */
loTriFactor->AA_h[0] = 1.0;
v = aa;
vi = aj;
offset = 1;
for (i=1; i<n; i++) {
nz = ai[i+1] - ai[i];
ierr = PetscArraycpy(&(loTriFactor->AA_h[offset]), v, nz);CHKERRQ(ierr);
offset += nz;
loTriFactor->AA_h[offset] = 1.0;
offset += 1;
v += nz;
}
loTriFactor->csrMat->values->assign(loTriFactor->AA_h, loTriFactor->AA_h+nzLower);
ierr = PetscLogCpuToGpu(nzLower*sizeof(PetscScalar));CHKERRQ(ierr);
}
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt n = A->rmap->n;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
hipsparseStatus_t stat;
const PetscInt *aj = a->j,*adiag = a->diag,*vi;
const MatScalar *aa = a->a,*v;
PetscInt *AiUp, *AjUp;
PetscInt i,nz, nzUpper, offset;
PetscErrorCode ierr;
hipError_t cerr;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(0);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
/* next, figure out the number of nonzeros in the upper triangular matrix. */
nzUpper = adiag[0]-adiag[n];
if (!upTriFactor) {
PetscScalar *AAUp;
cerr = hipHostMalloc((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
/* Allocate Space for the upper triangular matrix */
cerr = hipHostMalloc((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = hipHostMalloc((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(cerr);
/* Fill the upper triangular matrix */
AiUp[0]=(PetscInt) 0;
AiUp[n]=nzUpper;
offset = nzUpper;
for (i=n-1; i>=0; i--) {
v = aa + adiag[i+1] + 1;
vi = aj + adiag[i+1] + 1;
/* number of elements NOT on the diagonal */
nz = adiag[i] - adiag[i+1]-1;
/* decrement the offset */
offset -= (nz+1);
/* first, set the diagonal elements */
AjUp[offset] = (PetscInt) i;
AAUp[offset] = (MatScalar)1./v[nz];
AiUp[i] = AiUp[i+1] - (nz+1);
ierr = PetscArraycpy(&(AjUp[offset+1]), vi, nz);CHKERRQ(ierr);
ierr = PetscArraycpy(&(AAUp[offset+1]), v, nz);CHKERRQ(ierr);
}
/* allocate space for the triangular factor information */
ierr = PetscNew(&upTriFactor);CHKERRQ(ierr);
upTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = hipsparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(upTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = hipsparseSetMatFillMode(upTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatDiagType(upTriFactor->descr, HIPSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSPARSE(stat);
/* set the operation */
upTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* set the matrix */
upTriFactor->csrMat = new CsrMatrix;
upTriFactor->csrMat->num_rows = n;
upTriFactor->csrMat->num_cols = n;
upTriFactor->csrMat->num_entries = nzUpper;
upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1);
upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+n+1);
upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper);
upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+nzUpper);
upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper);
upTriFactor->csrMat->values->assign(AAUp, AAUp+nzUpper);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&upTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo,
&upTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&upTriFactor->solveBuffer,upTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactor->solveInfo,
upTriFactor->solvePolicy, upTriFactor->solveBuffer);CHKERRCUSPARSE(stat);
#else
upTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor;
upTriFactor->AA_h = AAUp;
cerr = hipHostFree(AiUp);CHKERRCUDA(cerr);
cerr = hipHostFree(AjUp);CHKERRCUDA(cerr);
ierr = PetscLogCpuToGpu((n+1+nzUpper)*sizeof(int)+nzUpper*sizeof(PetscScalar));CHKERRQ(ierr);
} else {
if (!upTriFactor->AA_h) {
cerr = hipHostMalloc((void**) &upTriFactor->AA_h, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
}
/* Fill the upper triangular matrix */
offset = nzUpper;
for (i=n-1; i>=0; i--) {
v = aa + adiag[i+1] + 1;
/* number of elements NOT on the diagonal */
nz = adiag[i] - adiag[i+1]-1;
/* decrement the offset */
offset -= (nz+1);
/* first, set the diagonal elements */
upTriFactor->AA_h[offset] = 1./v[nz];
ierr = PetscArraycpy(&(upTriFactor->AA_h[offset+1]), v, nz);CHKERRQ(ierr);
}
upTriFactor->csrMat->values->assign(upTriFactor->AA_h, upTriFactor->AA_h+nzUpper);
ierr = PetscLogCpuToGpu(nzUpper*sizeof(PetscScalar));CHKERRQ(ierr);
}
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A)
{
PetscErrorCode ierr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
IS isrow = a->row,iscol = a->icol;
PetscBool row_identity,col_identity;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
ierr = MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A);CHKERRQ(ierr);
if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); }
cusparseTriFactors->nnz=a->nz;
A->offloadmask = PETSC_OFFLOAD_BOTH;
/* lower triangular indices */
ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr);
if (!row_identity && !cusparseTriFactors->rpermIndices) {
const PetscInt *r;
ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr);
cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->rpermIndices->assign(r, r+n);
ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
}
/* upper triangular indices */
ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr);
if (!col_identity && !cusparseTriFactors->cpermIndices) {
const PetscInt *c;
ierr = ISGetIndices(iscol,&c);CHKERRQ(ierr);
cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->cpermIndices->assign(c, c+n);
ierr = ISRestoreIndices(iscol,&c);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
hipsparseStatus_t stat;
PetscErrorCode ierr;
hipError_t cerr;
PetscInt *AiUp, *AjUp;
PetscScalar *AAUp;
PetscScalar *AALo;
PetscInt nzUpper = a->nz,n = A->rmap->n,i,offset,nz,j;
Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ*)A->data;
const PetscInt *ai = b->i,*aj = b->j,*vj;
const MatScalar *aa = b->a,*v;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(0);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
cerr = hipHostMalloc((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipHostMalloc((void**) &AALo, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
if (!upTriFactor && !loTriFactor) {
/* Allocate Space for the upper triangular matrix */
cerr = hipHostMalloc((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = hipHostMalloc((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(cerr);
/* Fill the upper triangular matrix */
AiUp[0]=(PetscInt) 0;
AiUp[n]=nzUpper;
offset = 0;
for (i=0; i<n; i++) {
/* set the pointers */
v = aa + ai[i];
vj = aj + ai[i];
nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */
/* first, set the diagonal elements */
AjUp[offset] = (PetscInt) i;
AAUp[offset] = (MatScalar)1.0/v[nz];
AiUp[i] = offset;
AALo[offset] = (MatScalar)1.0/v[nz];
offset+=1;
if (nz>0) {
ierr = PetscArraycpy(&(AjUp[offset]), vj, nz);CHKERRQ(ierr);
ierr = PetscArraycpy(&(AAUp[offset]), v, nz);CHKERRQ(ierr);
for (j=offset; j<offset+nz; j++) {
AAUp[j] = -AAUp[j];
AALo[j] = AAUp[j]/v[nz];
}
offset+=nz;
}
}
/* allocate space for the triangular factor information */
ierr = PetscNew(&upTriFactor);CHKERRQ(ierr);
upTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = hipsparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(upTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = hipsparseSetMatFillMode(upTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatDiagType(upTriFactor->descr, HIPSPARSE_DIAG_TYPE_UNIT);CHKERRCUSPARSE(stat);
/* set the matrix */
upTriFactor->csrMat = new CsrMatrix;
upTriFactor->csrMat->num_rows = A->rmap->n;
upTriFactor->csrMat->num_cols = A->cmap->n;
upTriFactor->csrMat->num_entries = a->nz;
upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1);
upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz);
upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz);
upTriFactor->csrMat->values = new THRUSTARRAY(a->nz);
upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz);
/* set the operation */
upTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&upTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo,
&upTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&upTriFactor->solveBuffer,upTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactor->solveInfo,
upTriFactor->solvePolicy, upTriFactor->solveBuffer);CHKERRCUSPARSE(stat);
#else
upTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor;
/* allocate space for the triangular factor information */
ierr = PetscNew(&loTriFactor);CHKERRQ(ierr);
loTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = hipsparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(loTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = hipsparseSetMatFillMode(loTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatDiagType(loTriFactor->descr, HIPSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSPARSE(stat);
/* set the operation */
loTriFactor->solveOp = HIPSPARSE_OPERATION_TRANSPOSE;
/* set the matrix */
loTriFactor->csrMat = new CsrMatrix;
loTriFactor->csrMat->num_rows = A->rmap->n;
loTriFactor->csrMat->num_cols = A->cmap->n;
loTriFactor->csrMat->num_entries = a->nz;
loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1);
loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz);
loTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz);
loTriFactor->csrMat->values = new THRUSTARRAY(a->nz);
loTriFactor->csrMat->values->assign(AALo, AALo+a->nz);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&loTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo,
&loTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&loTriFactor->solveBuffer,loTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactor->solveInfo,
loTriFactor->solvePolicy, loTriFactor->solveBuffer);CHKERRCUSPARSE(stat);
#else
loTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor;
ierr = PetscLogCpuToGpu(2*(((A->rmap->n+1)+(a->nz))*sizeof(int)+(a->nz)*sizeof(PetscScalar)));CHKERRQ(ierr);
cerr = hipHostFree(AiUp);CHKERRCUDA(cerr);
cerr = hipHostFree(AjUp);CHKERRCUDA(cerr);
} else {
/* Fill the upper triangular matrix */
offset = 0;
for (i=0; i<n; i++) {
/* set the pointers */
v = aa + ai[i];
nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */
/* first, set the diagonal elements */
AAUp[offset] = 1.0/v[nz];
AALo[offset] = 1.0/v[nz];
offset+=1;
if (nz>0) {
ierr = PetscArraycpy(&(AAUp[offset]), v, nz);CHKERRQ(ierr);
for (j=offset; j<offset+nz; j++) {
AAUp[j] = -AAUp[j];
AALo[j] = AAUp[j]/v[nz];
}
offset+=nz;
}
}
if (!upTriFactor) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
if (!loTriFactor) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz);
loTriFactor->csrMat->values->assign(AALo, AALo+a->nz);
ierr = PetscLogCpuToGpu(2*(a->nz)*sizeof(PetscScalar));CHKERRQ(ierr);
}
cerr = hipHostFree(AAUp);CHKERRCUDA(cerr);
cerr = hipHostFree(AALo);CHKERRCUDA(cerr);
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A)
{
PetscErrorCode ierr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
IS ip = a->row;
PetscBool perm_identity;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
ierr = MatSeqAIJCUSPARSEBuildICCTriMatrices(A);CHKERRQ(ierr);
if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); }
cusparseTriFactors->nnz=(a->nz-n)*2 + n;
A->offloadmask = PETSC_OFFLOAD_BOTH;
/* lower triangular indices */
ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr);
if (!perm_identity) {
IS iip;
const PetscInt *irip,*rip;
ierr = ISInvertPermutation(ip,PETSC_DECIDE,&iip);CHKERRQ(ierr);
ierr = ISGetIndices(iip,&irip);CHKERRQ(ierr);
ierr = ISGetIndices(ip,&rip);CHKERRQ(ierr);
cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->rpermIndices->assign(rip, rip+n);
cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->cpermIndices->assign(irip, irip+n);
ierr = ISRestoreIndices(iip,&irip);CHKERRQ(ierr);
ierr = ISDestroy(&iip);CHKERRQ(ierr);
ierr = ISRestoreIndices(ip,&rip);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info)
{
Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
IS ip = b->row;
PetscBool perm_identity;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
ierr = MatCholeskyFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr);
B->offloadmask = PETSC_OFFLOAD_CPU;
/* determine which version of MatSolve needs to be used. */
ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr);
if (perm_identity) {
B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
} else {
B->ops->solve = MatSolve_SeqAIJCUSPARSE;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
}
/* get the triangular factors */
ierr = MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT;
hipsparseStatus_t stat;
hipsparseIndexBase_t indexBase;
hipsparseMatrixType_t matrixType;
hipsparseFillMode_t fillMode;
hipsparseDiagType_t diagType;
hipError_t cerr;
PetscErrorCode ierr;
PetscFunctionBegin;
/* allocate space for the transpose of the lower triangular factor */
ierr = PetscNew(&loTriFactorT);CHKERRQ(ierr);
loTriFactorT->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* set the matrix descriptors of the lower triangular factor */
matrixType = cusparseGetMatType(loTriFactor->descr);
indexBase = cusparseGetMatIndexBase(loTriFactor->descr);
fillMode = cusparseGetMatFillMode(loTriFactor->descr)==HIPSPARSE_FILL_MODE_UPPER ?
HIPSPARSE_FILL_MODE_LOWER : HIPSPARSE_FILL_MODE_UPPER;
diagType = cusparseGetMatDiagType(loTriFactor->descr);
/* Create the matrix description */
stat = hipsparseCreateMatDescr(&loTriFactorT->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(loTriFactorT->descr, indexBase);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatType(loTriFactorT->descr, matrixType);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatFillMode(loTriFactorT->descr, fillMode);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatDiagType(loTriFactorT->descr, diagType);CHKERRCUSPARSE(stat);
/* set the operation */
loTriFactorT->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* allocate GPU space for the CSC of the lower triangular factor*/
loTriFactorT->csrMat = new CsrMatrix;
loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_cols;
loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_rows;
loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries;
loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_rows+1);
loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_entries);
loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactorT->csrMat->num_entries);
/* compute the transpose of the lower triangular factor, i.e. the CSC */
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = hipsparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows,
loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
HIPSPARSE_ACTION_NUMERIC,indexBase,
HIPSPARSE_CSR2CSC_ALG1, &loTriFactor->csr2cscBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&loTriFactor->csr2cscBuffer,loTriFactor->csr2cscBufferSize);CHKERRCUDA(cerr);
#endif
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows,
loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactorT->csrMat->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
HIPSPARSE_ACTION_NUMERIC, indexBase,
HIPSPARSE_CSR2CSC_ALG1, loTriFactor->csr2cscBuffer);CHKERRCUSPARSE(stat);
#else
loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->csrMat->row_offsets->data().get(),
HIPSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&loTriFactorT->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo,
&loTriFactorT->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&loTriFactorT->solveBuffer,loTriFactorT->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactorT->solveInfo,
loTriFactorT->solvePolicy, loTriFactorT->solveBuffer);CHKERRCUSPARSE(stat);
#else
loTriFactorT->solveInfo);CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtrTranspose = loTriFactorT;
/*********************************************/
/* Now the Transpose of the Upper Tri Factor */
/*********************************************/
/* allocate space for the transpose of the upper triangular factor */
ierr = PetscNew(&upTriFactorT);CHKERRQ(ierr);
upTriFactorT->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* set the matrix descriptors of the upper triangular factor */
matrixType = cusparseGetMatType(upTriFactor->descr);
indexBase = cusparseGetMatIndexBase(upTriFactor->descr);
fillMode = cusparseGetMatFillMode(upTriFactor->descr)==HIPSPARSE_FILL_MODE_UPPER ?
HIPSPARSE_FILL_MODE_LOWER : HIPSPARSE_FILL_MODE_UPPER;
diagType = cusparseGetMatDiagType(upTriFactor->descr);
/* Create the matrix description */
stat = hipsparseCreateMatDescr(&upTriFactorT->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(upTriFactorT->descr, indexBase);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatType(upTriFactorT->descr, matrixType);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatFillMode(upTriFactorT->descr, fillMode);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatDiagType(upTriFactorT->descr, diagType);CHKERRCUSPARSE(stat);
/* set the operation */
upTriFactorT->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* allocate GPU space for the CSC of the upper triangular factor*/
upTriFactorT->csrMat = new CsrMatrix;
upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_cols;
upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_rows;
upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries;
upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_rows+1);
upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_entries);
upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactorT->csrMat->num_entries);
/* compute the transpose of the upper triangular factor, i.e. the CSC */
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = hipsparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle,upTriFactor->csrMat->num_rows,
upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
HIPSPARSE_ACTION_NUMERIC,indexBase,
HIPSPARSE_CSR2CSC_ALG1, &upTriFactor->csr2cscBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&upTriFactor->csr2cscBuffer,upTriFactor->csr2cscBufferSize);CHKERRCUDA(cerr);
#endif
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows,
upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactorT->csrMat->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
HIPSPARSE_ACTION_NUMERIC, indexBase,
HIPSPARSE_CSR2CSC_ALG1, upTriFactor->csr2cscBuffer);CHKERRCUSPARSE(stat);
#else
upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->csrMat->row_offsets->data().get(),
HIPSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&upTriFactorT->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo,
&upTriFactorT->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&upTriFactorT->solveBuffer,upTriFactorT->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactorT->solveInfo,
upTriFactorT->solvePolicy, upTriFactorT->solveBuffer);CHKERRCUSPARSE(stat);
#else
upTriFactorT->solveInfo);CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtrTranspose = upTriFactorT;
PetscFunctionReturn(0);
}
struct PetscScalarToPetscInt
{
__host__ __device__
PetscInt operator()(PetscScalar s)
{
return (PetscInt)PetscRealPart(s);
}
};
static PetscErrorCode MatSeqAIJCUSPARSEFormExplicitTransposeForMult(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct, *matstructT;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
hipsparseStatus_t stat;
hipsparseIndexBase_t indexBase;
hipError_t err;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!A->form_explicit_transpose || !A->rmap->n || !A->cmap->n) PetscFunctionReturn(0);
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat;
if (!matstruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing mat struct");
matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose;
if (A->transupdated && !matstructT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing matTranspose struct");
if (A->transupdated) PetscFunctionReturn(0);
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (cusparsestruct->format != MAT_CUSPARSE_CSR) {
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr);
}
if (!cusparsestruct->matTranspose) { /* create cusparse matrix */
matstructT = new Mat_SeqAIJCUSPARSEMultStruct;
stat = hipsparseCreateMatDescr(&matstructT->descr);CHKERRCUSPARSE(stat);
indexBase = cusparseGetMatIndexBase(matstruct->descr);
stat = hipsparseSetMatIndexBase(matstructT->descr, indexBase);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatType(matstructT->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
/* set alpha and beta */
err = hipMalloc((void **)&(matstructT->alpha_one),sizeof(PetscScalar));CHKERRCUDA(err);
err = hipMalloc((void **)&(matstructT->beta_zero),sizeof(PetscScalar));CHKERRCUDA(err);
err = hipMalloc((void **)&(matstructT->beta_one), sizeof(PetscScalar));CHKERRCUDA(err);
err = hipMemcpy(matstructT->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
err = hipMemcpy(matstructT->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
err = hipMemcpy(matstructT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *matrixT = new CsrMatrix;
matstructT->mat = matrixT;
matrixT->num_rows = A->cmap->n;
matrixT->num_cols = A->rmap->n;
matrixT->num_entries = a->nz;
matrixT->row_offsets = new THRUSTINTARRAY32(matrixT->num_rows+1);
matrixT->column_indices = new THRUSTINTARRAY32(a->nz);
matrixT->values = new THRUSTARRAY(a->nz);
if (!cusparsestruct->rowoffsets_gpu) { cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n+1); }
cusparsestruct->rowoffsets_gpu->assign(a->i,a->i+A->rmap->n+1);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = hipsparseCreateCsr(&matstructT->matDescr,
matrixT->num_rows, matrixT->num_cols, matrixT->num_entries,
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(),
matrixT->values->data().get(),
HIPSPARSE_INDEX_32I,HIPSPARSE_INDEX_32I, /* row offset, col idx type due to THRUSTINTARRAY32 */
indexBase,cusparse_scalartype);CHKERRCUSPARSE(stat);
#endif
} else if (cusparsestruct->format == MAT_CUSPARSE_ELL || cusparsestruct->format == MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
CsrMatrix *temp = new CsrMatrix;
CsrMatrix *tempT = new CsrMatrix;
/* First convert HYB to CSR */
temp->num_rows = A->rmap->n;
temp->num_cols = A->cmap->n;
temp->num_entries = a->nz;
temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
temp->column_indices = new THRUSTINTARRAY32(a->nz);
temp->values = new THRUSTARRAY(a->nz);
stat = cusparse_hyb2csr(cusparsestruct->handle,
matstruct->descr, (cusparseHybMat_t)matstruct->mat,
temp->values->data().get(),
temp->row_offsets->data().get(),
temp->column_indices->data().get());CHKERRCUSPARSE(stat);
/* Next, convert CSR to CSC (i.e. the matrix transpose) */
tempT->num_rows = A->rmap->n;
tempT->num_cols = A->cmap->n;
tempT->num_entries = a->nz;
tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
tempT->column_indices = new THRUSTINTARRAY32(a->nz);
tempT->values = new THRUSTARRAY(a->nz);
stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows,
temp->num_cols, temp->num_entries,
temp->values->data().get(),
temp->row_offsets->data().get(),
temp->column_indices->data().get(),
tempT->values->data().get(),
tempT->column_indices->data().get(),
tempT->row_offsets->data().get(),
HIPSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat);
/* Last, convert CSC to HYB */
cusparseHybMat_t hybMat;
stat = cusparseCreateHybMat(&hybMat);CHKERRCUSPARSE(stat);
cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ?
CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO;
stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n,
matstructT->descr, tempT->values->data().get(),
tempT->row_offsets->data().get(),
tempT->column_indices->data().get(),
hybMat, 0, partition);CHKERRCUSPARSE(stat);
/* assign the pointer */
matstructT->mat = hybMat;
A->transupdated = PETSC_TRUE;
/* delete temporaries */
if (tempT) {
if (tempT->values) delete (THRUSTARRAY*) tempT->values;
if (tempT->column_indices) delete (THRUSTINTARRAY32*) tempT->column_indices;
if (tempT->row_offsets) delete (THRUSTINTARRAY32*) tempT->row_offsets;
delete (CsrMatrix*) tempT;
}
if (temp) {
if (temp->values) delete (THRUSTARRAY*) temp->values;
if (temp->column_indices) delete (THRUSTINTARRAY32*) temp->column_indices;
if (temp->row_offsets) delete (THRUSTINTARRAY32*) temp->row_offsets;
delete (CsrMatrix*) temp;
}
#endif
}
}
if (cusparsestruct->format == MAT_CUSPARSE_CSR) { /* transpose mat struct may be already present, update data */
CsrMatrix *matrix = (CsrMatrix*)matstruct->mat;
CsrMatrix *matrixT = (CsrMatrix*)matstructT->mat;
if (!matrix) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrix");
if (!matrix->row_offsets) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrix rows");
if (!matrix->column_indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrix cols");
if (!matrix->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrix values");
if (!matrixT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrixT");
if (!matrixT->row_offsets) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrixT rows");
if (!matrixT->column_indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrixT cols");
if (!matrixT->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrixT values");
if (!cusparsestruct->rowoffsets_gpu) { /* this may be absent when we did not construct the transpose with csr2csc */
cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1);
cusparsestruct->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1);
ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
if (!cusparsestruct->csr2csc_i) {
THRUSTARRAY csr2csc_a(matrix->num_entries);
PetscStackCallThrust(thrust::sequence(thrust::device, csr2csc_a.begin(), csr2csc_a.end(), 0.0));
indexBase = cusparseGetMatIndexBase(matstruct->descr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
void *csr2cscBuffer;
size_t csr2cscBufferSize;
stat = hipsparseCsr2cscEx2_bufferSize(cusparsestruct->handle, A->rmap->n,
A->cmap->n, matrix->num_entries,
matrix->values->data().get(),
cusparsestruct->rowoffsets_gpu->data().get(),
matrix->column_indices->data().get(),
matrixT->values->data().get(),
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype,
HIPSPARSE_ACTION_NUMERIC,indexBase,
cusparsestruct->csr2cscAlg, &csr2cscBufferSize);CHKERRCUSPARSE(stat);
err = hipMalloc(&csr2cscBuffer,csr2cscBufferSize);CHKERRCUDA(err);
#endif
if (matrix->num_entries) {
/* When there are no nonzeros, this routine mistakenly returns HIPSPARSE_STATUS_INVALID_VALUE in
mat_tests-ex62_15_mpiaijcusparse on ranks 0 and 2 with CUDA-11. But CUDA-10 is OK.
I checked every parameters and they were just fine. I have no clue why cusparse complains.
Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2, when nnz = 0, matrixT->row_offsets[]
should be filled with indexBase. So I just take a shortcut here.
*/
stat = cusparse_csr2csc(cusparsestruct->handle, A->rmap->n,
A->cmap->n,matrix->num_entries,
csr2csc_a.data().get(),
cusparsestruct->rowoffsets_gpu->data().get(),
matrix->column_indices->data().get(),
matrixT->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype,
HIPSPARSE_ACTION_NUMERIC,indexBase,
cusparsestruct->csr2cscAlg, csr2cscBuffer);CHKERRCUSPARSE(stat);
#else
matrixT->column_indices->data().get(), matrixT->row_offsets->data().get(),
HIPSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat);
#endif
} else {
matrixT->row_offsets->assign(matrixT->row_offsets->size(),indexBase);
}
cusparsestruct->csr2csc_i = new THRUSTINTARRAY(matrix->num_entries);
PetscStackCallThrust(thrust::transform(thrust::device,matrixT->values->begin(),matrixT->values->end(),cusparsestruct->csr2csc_i->begin(),PetscScalarToPetscInt()));
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
err = hipFree(csr2cscBuffer);CHKERRCUDA(err);
#endif
}
PetscStackCallThrust(thrust::copy(thrust::device,thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->begin()),
thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->end()),
matrixT->values->begin()));
}
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogEventEnd(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
/* the compressed row indices is not used for matTranspose */
matstructT->cprowIndices = NULL;
/* assign the pointer */
((Mat_SeqAIJCUSPARSE*)A->spptr)->matTranspose = matstructT;
A->transupdated = PETSC_TRUE;
PetscFunctionReturn(0);
}
/* Why do we need to analyze the transposed matrix again? Can't we just use op(A) = HIPSPARSE_OPERATION_TRANSPOSE in MatSolve_SeqAIJCUSPARSE? */
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx)
{
PetscInt n = xx->map->n;
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
hipsparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
PetscFunctionBegin;
/* Analyze the matrix and create the transpose ... on the fly */
if (!loTriFactorT && !upTriFactorT) {
ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr);
loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
}
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, reorder with the row permutation */
thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()),
thrust::make_permutation_iterator(bGPU+n, cusparseTriFactors->rpermIndices->end()),
xGPU);
/* First, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(),
upTriFactorT->solveInfo,
xarray,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
tempGPU->data().get(),
upTriFactorT->solvePolicy, upTriFactorT->solveBuffer);CHKERRCUSPARSE(stat);
#else
tempGPU->data().get());CHKERRCUSPARSE(stat);
#endif
/* Then, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(),
loTriFactorT->solveInfo,
tempGPU->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
xarray,
loTriFactorT->solvePolicy, loTriFactorT->solveBuffer);CHKERRCUSPARSE(stat);
#else
xarray);CHKERRCUSPARSE(stat);
#endif
/* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */
thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()),
thrust::make_permutation_iterator(xGPU+n, cusparseTriFactors->cpermIndices->end()),
tempGPU->begin());
/* Copy the temporary to the full solution. */
thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),tempGPU->begin(), tempGPU->end(), xGPU);
/* restore */
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
hipsparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
PetscFunctionBegin;
/* Analyze the matrix and create the transpose ... on the fly */
if (!loTriFactorT && !upTriFactorT) {
ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr);
loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
}
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(),
upTriFactorT->solveInfo,
barray,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
tempGPU->data().get(),
upTriFactorT->solvePolicy, upTriFactorT->solveBuffer);CHKERRCUSPARSE(stat);
#else
tempGPU->data().get());CHKERRCUSPARSE(stat);
#endif
/* Then, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(),
loTriFactorT->solveInfo,
tempGPU->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
xarray,
loTriFactorT->solvePolicy, loTriFactorT->solveBuffer);CHKERRCUSPARSE(stat);
#else
xarray);CHKERRCUSPARSE(stat);
#endif
/* restore */
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
hipsparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
PetscFunctionBegin;
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, reorder with the row permutation */
thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()),
thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()),
tempGPU->begin());
/* Next, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactor->solveInfo,
tempGPU->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
xarray,
loTriFactor->solvePolicy, loTriFactor->solveBuffer);CHKERRCUSPARSE(stat);
#else
xarray);CHKERRCUSPARSE(stat);
#endif
/* Then, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactor->solveInfo,xarray,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
tempGPU->data().get(),
upTriFactor->solvePolicy, upTriFactor->solveBuffer);CHKERRCUSPARSE(stat);
#else
tempGPU->data().get());CHKERRCUSPARSE(stat);
#endif
/* Last, reorder with the column permutation */
thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()),
thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()),
xGPU);
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
hipsparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
PetscFunctionBegin;
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactor->solveInfo,
barray,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
tempGPU->data().get(),
loTriFactor->solvePolicy,loTriFactor->solveBuffer);CHKERRCUSPARSE(stat);
#else
tempGPU->data().get());CHKERRCUSPARSE(stat);
#endif
/* Next, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactor->solveInfo,
tempGPU->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
xarray,
upTriFactor->solvePolicy, upTriFactor->solveBuffer);CHKERRCUSPARSE(stat);
#else
xarray);CHKERRCUSPARSE(stat);
#endif
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
hipError_t cerr;
PetscErrorCode ierr;
PetscFunctionBegin;
if (A->offloadmask == PETSC_OFFLOAD_GPU) {
CsrMatrix *matrix = (CsrMatrix*)cusp->mat->mat;
ierr = PetscLogEventBegin(MAT_CUSPARSECopyFromGPU,A,0,0,0);CHKERRQ(ierr);
cerr = hipMemcpy(a->a, matrix->values->data().get(), a->nz*sizeof(PetscScalar), hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuToCpu(a->nz*sizeof(PetscScalar));CHKERRQ(ierr);
ierr = PetscLogEventEnd(MAT_CUSPARSECopyFromGPU,A,0,0,0);CHKERRQ(ierr);
A->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJGetArray_SeqAIJCUSPARSE(Mat A,PetscScalar *array[])
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
*array = a->a;
A->offloadmask = PETSC_OFFLOAD_CPU;
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct = cusparsestruct->mat;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt m = A->rmap->n,*ii,*ridx,tmp;
PetscErrorCode ierr;
hipsparseStatus_t stat;
PetscBool both = PETSC_TRUE;
hipError_t err;
PetscFunctionBegin;
if (A->boundtocpu) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Cannot copy to GPU");
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
if (A->nonzerostate == cusparsestruct->nonzerostate && cusparsestruct->format == MAT_CUSPARSE_CSR) { /* Copy values only */
CsrMatrix *matrix;
matrix = (CsrMatrix*)cusparsestruct->mat->mat;
if (a->nz && !a->a) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CSR values");
ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
matrix->values->assign(a->a, a->a+a->nz);
err = WaitForCUDA();CHKERRCUDA(err);
ierr = PetscLogCpuToGpu((a->nz)*sizeof(PetscScalar));CHKERRQ(ierr);
ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr);
} else {
PetscInt nnz;
ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusparsestruct->mat,cusparsestruct->format);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr);
delete cusparsestruct->workVector;
delete cusparsestruct->rowoffsets_gpu;
cusparsestruct->workVector = NULL;
cusparsestruct->rowoffsets_gpu = NULL;
try {
if (a->compressedrow.use) {
m = a->compressedrow.nrows;
ii = a->compressedrow.i;
ridx = a->compressedrow.rindex;
} else {
m = A->rmap->n;
ii = a->i;
ridx = NULL;
}
if (!ii) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CSR row data");
if (m && !a->j) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CSR column data");
if (!a->a) { nnz = ii[m]; both = PETSC_FALSE; }
else nnz = a->nz;
/* create cusparse matrix */
cusparsestruct->nrows = m;
matstruct = new Mat_SeqAIJCUSPARSEMultStruct;
stat = hipsparseCreateMatDescr(&matstruct->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(matstruct->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatType(matstruct->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
err = hipMalloc((void **)&(matstruct->alpha_one),sizeof(PetscScalar));CHKERRCUDA(err);
err = hipMalloc((void **)&(matstruct->beta_zero),sizeof(PetscScalar));CHKERRCUDA(err);
err = hipMalloc((void **)&(matstruct->beta_one), sizeof(PetscScalar));CHKERRCUDA(err);
err = hipMemcpy(matstruct->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
err = hipMemcpy(matstruct->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
err = hipMemcpy(matstruct->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
stat = hipsparseSetPointerMode(cusparsestruct->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
/* Build a hybrid/ellpack matrix if this option is chosen for the storage */
if (cusparsestruct->format==MAT_CUSPARSE_CSR) {
/* set the matrix */
CsrMatrix *mat= new CsrMatrix;
mat->num_rows = m;
mat->num_cols = A->cmap->n;
mat->num_entries = nnz;
mat->row_offsets = new THRUSTINTARRAY32(m+1);
mat->row_offsets->assign(ii, ii + m+1);
mat->column_indices = new THRUSTINTARRAY32(nnz);
mat->column_indices->assign(a->j, a->j+nnz);
mat->values = new THRUSTARRAY(nnz);
if (a->a) mat->values->assign(a->a, a->a+nnz);
/* assign the pointer */
matstruct->mat = mat;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (mat->num_rows) { /* cusparse errors on empty matrices! */
stat = hipsparseCreateCsr(&matstruct->matDescr,
mat->num_rows, mat->num_cols, mat->num_entries,
mat->row_offsets->data().get(), mat->column_indices->data().get(),
mat->values->data().get(),
HIPSPARSE_INDEX_32I,HIPSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */
HIPSPARSE_INDEX_BASE_ZERO,cusparse_scalartype);CHKERRCUSPARSE(stat);
}
#endif
} else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
CsrMatrix *mat= new CsrMatrix;
mat->num_rows = m;
mat->num_cols = A->cmap->n;
mat->num_entries = nnz;
mat->row_offsets = new THRUSTINTARRAY32(m+1);
mat->row_offsets->assign(ii, ii + m+1);
mat->column_indices = new THRUSTINTARRAY32(nnz);
mat->column_indices->assign(a->j, a->j+nnz);
mat->values = new THRUSTARRAY(nnz);
if (a->a) mat->values->assign(a->a, a->a+nnz);
cusparseHybMat_t hybMat;
stat = cusparseCreateHybMat(&hybMat);CHKERRCUSPARSE(stat);
cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ?
CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO;
stat = cusparse_csr2hyb(cusparsestruct->handle, mat->num_rows, mat->num_cols,
matstruct->descr, mat->values->data().get(),
mat->row_offsets->data().get(),
mat->column_indices->data().get(),
hybMat, 0, partition);CHKERRCUSPARSE(stat);
/* assign the pointer */
matstruct->mat = hybMat;
if (mat) {
if (mat->values) delete (THRUSTARRAY*)mat->values;
if (mat->column_indices) delete (THRUSTINTARRAY32*)mat->column_indices;
if (mat->row_offsets) delete (THRUSTINTARRAY32*)mat->row_offsets;
delete (CsrMatrix*)mat;
}
#endif
}
/* assign the compressed row indices */
if (a->compressedrow.use) {
cusparsestruct->workVector = new THRUSTARRAY(m);
matstruct->cprowIndices = new THRUSTINTARRAY(m);
matstruct->cprowIndices->assign(ridx,ridx+m);
tmp = m;
} else {
cusparsestruct->workVector = NULL;
matstruct->cprowIndices = NULL;
tmp = 0;
}
ierr = PetscLogCpuToGpu(((m+1)+(a->nz))*sizeof(int)+tmp*sizeof(PetscInt)+(3+(a->nz))*sizeof(PetscScalar));CHKERRQ(ierr);
/* assign the pointer */
cusparsestruct->mat = matstruct;
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
err = WaitForCUDA();CHKERRCUDA(err);
ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
cusparsestruct->nonzerostate = A->nonzerostate;
}
if (both) A->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(0);
}
struct VecCUDAPlusEquals
{
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t);
}
};
struct VecCUDAEquals
{
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<1>(t) = thrust::get<0>(t);
}
};
struct VecCUDAEqualsReverse
{
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<0>(t) = thrust::get<1>(t);
}
};
struct MatMatCusparse {
PetscBool cisdense;
PetscScalar *Bt;
Mat X;
PetscBool reusesym; /* Cusparse does not have split symbolic and numeric phases for sparse matmat operations */
PetscLogDouble flops;
CsrMatrix *Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
hipsparseSpMatDescr_t matSpBDescr;
PetscBool initialized; /* C = alpha op(A) op(B) + beta C */
hipsparseDnMatDescr_t matBDescr;
hipsparseDnMatDescr_t matCDescr;
PetscInt Blda,Clda; /* Record leading dimensions of B and C here to detect changes*/
#if PETSC_PKG_CUDA_VERSION_GE(11,4,0)
void *dBuffer4;
void *dBuffer5;
#endif
size_t mmBufferSize;
void *mmBuffer;
void *mmBuffer2; /* SpGEMM WorkEstimation buffer */
hipsparseSpGEMMDescr_t spgemmDesc;
#endif
};
static PetscErrorCode MatDestroy_MatMatCusparse(void *data)
{
PetscErrorCode ierr;
MatMatCusparse *mmdata = (MatMatCusparse *)data;
hipError_t cerr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
hipsparseStatus_t stat;
#endif
PetscFunctionBegin;
cerr = hipFree(mmdata->Bt);CHKERRCUDA(cerr);
delete mmdata->Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (mmdata->matSpBDescr) { stat = hipsparseDestroySpMat(mmdata->matSpBDescr);CHKERRCUSPARSE(stat); }
if (mmdata->matBDescr) { stat = hipsparseDestroyDnMat(mmdata->matBDescr);CHKERRCUSPARSE(stat); }
if (mmdata->matCDescr) { stat = hipsparseDestroyDnMat(mmdata->matCDescr);CHKERRCUSPARSE(stat); }
if (mmdata->spgemmDesc) { stat = hipsparseSpGEMM_destroyDescr(mmdata->spgemmDesc);CHKERRCUSPARSE(stat); }
#if PETSC_PKG_CUDA_VERSION_GE(11,4,0)
if (mmdata->dBuffer4) { cerr = hipFree(mmdata->dBuffer4);CHKERRCUDA(cerr); }
if (mmdata->dBuffer5) { cerr = hipFree(mmdata->dBuffer5);CHKERRCUDA(cerr); }
#endif
if (mmdata->mmBuffer) { cerr = hipFree(mmdata->mmBuffer);CHKERRCUDA(cerr); }
if (mmdata->mmBuffer2) { cerr = hipFree(mmdata->mmBuffer2);CHKERRCUDA(cerr); }
#endif
ierr = MatDestroy(&mmdata->X);CHKERRQ(ierr);
ierr = PetscFree(data);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(Mat,Mat,Mat,PetscBool,PetscBool);
static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
PetscInt m,n,blda,clda;
PetscBool flg,biscuda;
Mat_SeqAIJCUSPARSE *cusp;
hipsparseStatus_t stat;
hipsparseOperation_t opA;
const PetscScalar *barray;
PetscScalar *carray;
PetscErrorCode ierr;
MatMatCusparse *mmdata;
Mat_SeqAIJCUSPARSEMultStruct *mat;
CsrMatrix *csrmat;
PetscFunctionBegin;
MatCheckProduct(C,1);
if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Product data empty");
mmdata = (MatMatCusparse*)product->data;
A = product->A;
B = product->B;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_GPU,"Not for type %s",((PetscObject)A)->type_name);
/* currently CopyToGpu does not copy if the matrix is bound to CPU
Instead of silently accepting the wrong answer, I prefer to raise the error */
if (A->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_PtAP:
mat = cusp->mat;
opA = HIPSPARSE_OPERATION_NON_TRANSPOSE;
m = A->rmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_AtB:
if (!A->form_explicit_transpose) {
mat = cusp->mat;
opA = HIPSPARSE_OPERATION_TRANSPOSE;
} else {
ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr);
mat = cusp->matTranspose;
opA = HIPSPARSE_OPERATION_NON_TRANSPOSE;
}
m = A->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_ABt:
case MATPRODUCT_RARt:
mat = cusp->mat;
opA = HIPSPARSE_OPERATION_NON_TRANSPOSE;
m = A->rmap->n;
n = B->rmap->n;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Unsupported product type %s",MatProductTypes[product->type]);
}
if (!mat) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing Mat_SeqAIJCUSPARSEMultStruct");
csrmat = (CsrMatrix*)mat->mat;
/* if the user passed a CPU matrix, copy the data to the GPU */
ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQDENSECUDA,&biscuda);CHKERRQ(ierr);
if (!biscuda) {ierr = MatConvert(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);}
ierr = MatDenseCUDAGetArrayRead(B,&barray);CHKERRQ(ierr);
ierr = MatDenseGetLDA(B,&blda);CHKERRQ(ierr);
if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) {
ierr = MatDenseCUDAGetArrayWrite(mmdata->X,&carray);CHKERRQ(ierr);
ierr = MatDenseGetLDA(mmdata->X,&clda);CHKERRQ(ierr);
} else {
ierr = MatDenseCUDAGetArrayWrite(C,&carray);CHKERRQ(ierr);
ierr = MatDenseGetLDA(C,&clda);CHKERRQ(ierr);
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
hipsparseOperation_t opB = (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) ? HIPSPARSE_OPERATION_TRANSPOSE : HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* (re)allocate mmBuffer if not initialized or LDAs are different */
if (!mmdata->initialized || mmdata->Blda != blda || mmdata->Clda != clda) {
size_t mmBufferSize;
if (mmdata->initialized && mmdata->Blda != blda) {stat = hipsparseDestroyDnMat(mmdata->matBDescr);CHKERRCUSPARSE(stat); mmdata->matBDescr = NULL;}
if (!mmdata->matBDescr) {
stat = hipsparseCreateDnMat(&mmdata->matBDescr,B->rmap->n,B->cmap->n,blda,(void*)barray,cusparse_scalartype,HIPSPARSE_ORDER_COL);CHKERRCUSPARSE(stat);
mmdata->Blda = blda;
}
if (mmdata->initialized && mmdata->Clda != clda) {stat = hipsparseDestroyDnMat(mmdata->matCDescr);CHKERRCUSPARSE(stat); mmdata->matCDescr = NULL;}
if (!mmdata->matCDescr) { /* matCDescr is for C or mmdata->X */
stat = hipsparseCreateDnMat(&mmdata->matCDescr,m,n,clda,(void*)carray,cusparse_scalartype,HIPSPARSE_ORDER_COL);CHKERRCUSPARSE(stat);
mmdata->Clda = clda;
}
if (!mat->matDescr) {
stat = hipsparseCreateCsr(&mat->matDescr,
csrmat->num_rows, csrmat->num_cols, csrmat->num_entries,
csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(),
csrmat->values->data().get(),
HIPSPARSE_INDEX_32I,HIPSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */
HIPSPARSE_INDEX_BASE_ZERO,cusparse_scalartype);CHKERRCUSPARSE(stat);
}
stat = hipsparseSpMM_bufferSize(cusp->handle,opA,opB,mat->alpha_one,
mat->matDescr,mmdata->matBDescr,mat->beta_zero,
mmdata->matCDescr,cusparse_scalartype,
cusp->spmmAlg,&mmBufferSize);CHKERRCUSPARSE(stat);
if ((mmdata->mmBuffer && mmdata->mmBufferSize < mmBufferSize) || !mmdata->mmBuffer) {
hipError_t cerr;
cerr = hipFree(mmdata->mmBuffer);CHKERRCUDA(cerr);
cerr = hipMalloc(&mmdata->mmBuffer,mmBufferSize);CHKERRCUDA(cerr);
mmdata->mmBufferSize = mmBufferSize;
}
mmdata->initialized = PETSC_TRUE;
} else {
/* to be safe, always update pointers of the mats */
stat = hipsparseSpMatSetValues(mat->matDescr,csrmat->values->data().get());CHKERRCUSPARSE(stat);
stat = hipsparseDnMatSetValues(mmdata->matBDescr,(void*)barray);CHKERRCUSPARSE(stat);
stat = hipsparseDnMatSetValues(mmdata->matCDescr,(void*)carray);CHKERRCUSPARSE(stat);
}
/* do hipsparseSpMM, which supports transpose on B */
stat = hipsparseSpMM(cusp->handle,opA,opB,mat->alpha_one,
mat->matDescr,mmdata->matBDescr,mat->beta_zero,
mmdata->matCDescr,cusparse_scalartype,
cusp->spmmAlg,mmdata->mmBuffer);CHKERRCUSPARSE(stat);
#else
PetscInt k;
/* cusparseXcsrmm does not support transpose on B */
if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) {
hipblasHandle_t cublasv2handle;
hipblasStatus_t cerr;
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
cerr = cublasXgeam(cublasv2handle,HIPBLAS_OP_T,HIPBLAS_OP_T,
B->cmap->n,B->rmap->n,
&PETSC_CUSPARSE_ONE ,barray,blda,
&PETSC_CUSPARSE_ZERO,barray,blda,
mmdata->Bt,B->cmap->n);CHKERRCUBLAS(cerr);
blda = B->cmap->n;
k = B->cmap->n;
} else {
k = B->rmap->n;
}
/* perform the MatMat operation, op(A) is m x k, op(B) is k x n */
stat = cusparse_csr_spmm(cusp->handle,opA,m,n,k,
csrmat->num_entries,mat->alpha_one,mat->descr,
csrmat->values->data().get(),
csrmat->row_offsets->data().get(),
csrmat->column_indices->data().get(),
mmdata->Bt ? mmdata->Bt : barray,blda,mat->beta_zero,
carray,clda);CHKERRCUSPARSE(stat);
#endif
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(n*2.0*csrmat->num_entries);CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayRead(B,&barray);CHKERRQ(ierr);
if (product->type == MATPRODUCT_RARt) {
ierr = MatDenseCUDARestoreArrayWrite(mmdata->X,&carray);CHKERRQ(ierr);
ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(B,mmdata->X,C,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr);
} else if (product->type == MATPRODUCT_PtAP) {
ierr = MatDenseCUDARestoreArrayWrite(mmdata->X,&carray);CHKERRQ(ierr);
ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(B,mmdata->X,C,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr);
} else {
ierr = MatDenseCUDARestoreArrayWrite(C,&carray);CHKERRQ(ierr);
}
if (mmdata->cisdense) {
ierr = MatConvert(C,MATSEQDENSE,MAT_INPLACE_MATRIX,&C);CHKERRQ(ierr);
}
if (!biscuda) {
ierr = MatConvert(B,MATSEQDENSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
PetscInt m,n;
PetscBool cisdense,flg;
PetscErrorCode ierr;
MatMatCusparse *mmdata;
Mat_SeqAIJCUSPARSE *cusp;
PetscFunctionBegin;
MatCheckProduct(C,1);
if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Product data not empty");
A = product->A;
B = product->B;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for type %s",((PetscObject)A)->type_name);
cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
if (cusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format");
switch (product->type) {
case MATPRODUCT_AB:
m = A->rmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_AtB:
m = A->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_ABt:
m = A->rmap->n;
n = B->rmap->n;
break;
case MATPRODUCT_PtAP:
m = B->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_RARt:
m = B->rmap->n;
n = B->rmap->n;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Unsupported product type %s",MatProductTypes[product->type]);
}
ierr = MatSetSizes(C,m,n,m,n);CHKERRQ(ierr);
/* if C is of type MATSEQDENSE (CPU), perform the operation on the GPU and then copy on the CPU */
ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQDENSE,&cisdense);CHKERRQ(ierr);
ierr = MatSetType(C,MATSEQDENSECUDA);CHKERRQ(ierr);
/* product data */
ierr = PetscNew(&mmdata);CHKERRQ(ierr);
mmdata->cisdense = cisdense;
#if PETSC_PKG_CUDA_VERSION_LT(11,0,0)
/* cusparseXcsrmm does not support transpose on B, so we allocate buffer to store B^T */
if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) {
hipError_t cerr = hipMalloc((void**)&mmdata->Bt,(size_t)B->rmap->n*(size_t)B->cmap->n*sizeof(PetscScalar));CHKERRCUDA(cerr);
}
#endif
/* for these products we need intermediate storage */
if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) {
ierr = MatCreate(PetscObjectComm((PetscObject)C),&mmdata->X);CHKERRQ(ierr);
ierr = MatSetType(mmdata->X,MATSEQDENSECUDA);CHKERRQ(ierr);
if (product->type == MATPRODUCT_RARt) { /* do not preallocate, since the first call to MatDenseCUDAGetArray will preallocate on the GPU for us */
ierr = MatSetSizes(mmdata->X,A->rmap->n,B->rmap->n,A->rmap->n,B->rmap->n);CHKERRQ(ierr);
} else {
ierr = MatSetSizes(mmdata->X,A->rmap->n,B->cmap->n,A->rmap->n,B->cmap->n);CHKERRQ(ierr);
}
}
C->product->data = mmdata;
C->product->destroy = MatDestroy_MatMatCusparse;
C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA;
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
Mat_SeqAIJCUSPARSE *Acusp,*Bcusp,*Ccusp;
Mat_SeqAIJ *c = (Mat_SeqAIJ*)C->data;
Mat_SeqAIJCUSPARSEMultStruct *Amat,*Bmat,*Cmat;
CsrMatrix *Acsr,*Bcsr,*Ccsr;
PetscBool flg;
PetscErrorCode ierr;
hipsparseStatus_t stat;
hipError_t cerr;
MatProductType ptype;
MatMatCusparse *mmdata;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
hipsparseSpMatDescr_t BmatSpDescr;
#endif
hipsparseOperation_t opA = HIPSPARSE_OPERATION_NON_TRANSPOSE,opB = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* cuSPARSE spgemm doesn't support transpose yet */
PetscFunctionBegin;
MatCheckProduct(C,1);
if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Product data empty");
ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for C of type %s",((PetscObject)C)->type_name);
mmdata = (MatMatCusparse*)C->product->data;
A = product->A;
B = product->B;
if (mmdata->reusesym) { /* this happens when api_user is true, meaning that the matrix values have been already computed in the MatProductSymbolic phase */
mmdata->reusesym = PETSC_FALSE;
Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr;
if (Ccusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format");
Cmat = Ccusp->mat;
if (!Cmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing C mult struct for product type %s",MatProductTypes[C->product->type]);
Ccsr = (CsrMatrix*)Cmat->mat;
if (!Ccsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing C CSR struct");
goto finalize;
}
if (!c->nz) goto finalize;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for type %s",((PetscObject)A)->type_name);
ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for B of type %s",((PetscObject)B)->type_name);
if (A->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
if (B->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr;
Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr;
if (Acusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format");
if (Bcusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format");
if (Ccusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
ptype = product->type;
if (A->symmetric && ptype == MATPRODUCT_AtB) ptype = MATPRODUCT_AB;
if (B->symmetric && ptype == MATPRODUCT_ABt) ptype = MATPRODUCT_AB;
switch (ptype) {
case MATPRODUCT_AB:
Amat = Acusp->mat;
Bmat = Bcusp->mat;
break;
case MATPRODUCT_AtB:
Amat = Acusp->matTranspose;
Bmat = Bcusp->mat;
break;
case MATPRODUCT_ABt:
Amat = Acusp->mat;
Bmat = Bcusp->matTranspose;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Unsupported product type %s",MatProductTypes[product->type]);
}
Cmat = Ccusp->mat;
if (!Amat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing A mult struct for product type %s",MatProductTypes[ptype]);
if (!Bmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing B mult struct for product type %s",MatProductTypes[ptype]);
if (!Cmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing C mult struct for product type %s",MatProductTypes[ptype]);
Acsr = (CsrMatrix*)Amat->mat;
Bcsr = mmdata->Bcsr ? mmdata->Bcsr : (CsrMatrix*)Bmat->mat; /* B may be in compressed row storage */
Ccsr = (CsrMatrix*)Cmat->mat;
if (!Acsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing A CSR struct");
if (!Bcsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing B CSR struct");
if (!Ccsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing C CSR struct");
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
BmatSpDescr = mmdata->Bcsr ? mmdata->matSpBDescr : Bmat->matDescr; /* B may be in compressed row storage */
stat = hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(11,4,0)
stat = cusparseSpGEMMreuse_compute(Ccusp->handle, opA, opB,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
#else
stat = hipsparseSpGEMM_compute(Ccusp->handle, opA, opB,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);CHKERRCUSPARSE(stat);
stat = hipsparseSpGEMM_copy(Ccusp->handle, opA, opB,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
#endif
#else
stat = cusparse_csr_spgemm(Ccusp->handle, opA, opB,
Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols,
Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(),
Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());CHKERRCUSPARSE(stat);
#endif
ierr = PetscLogGpuFlops(mmdata->flops);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
C->offloadmask = PETSC_OFFLOAD_GPU;
finalize:
/* shorter version of MatAssemblyEnd_SeqAIJ */
ierr = PetscInfo3(C,"Matrix size: %D X %D; storage space: 0 unneeded,%D used\n",C->rmap->n,C->cmap->n,c->nz);CHKERRQ(ierr);
ierr = PetscInfo(C,"Number of mallocs during MatSetValues() is 0\n");CHKERRQ(ierr);
ierr = PetscInfo1(C,"Maximum nonzeros in any row is %D\n",c->rmax);CHKERRQ(ierr);
c->reallocs = 0;
C->info.mallocs += 0;
C->info.nz_unneeded = 0;
C->assembled = C->was_assembled = PETSC_TRUE;
C->num_ass++;
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
Mat_SeqAIJCUSPARSE *Acusp,*Bcusp,*Ccusp;
Mat_SeqAIJ *a,*b,*c;
Mat_SeqAIJCUSPARSEMultStruct *Amat,*Bmat,*Cmat;
CsrMatrix *Acsr,*Bcsr,*Ccsr;
PetscInt i,j,m,n,k;
PetscBool flg;
PetscErrorCode ierr;
hipsparseStatus_t stat;
hipError_t cerr;
MatProductType ptype;
MatMatCusparse *mmdata;
PetscLogDouble flops;
PetscBool biscompressed,ciscompressed;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
int64_t C_num_rows1, C_num_cols1, C_nnz1;
hipsparseSpMatDescr_t BmatSpDescr;
#else
int cnz;
#endif
hipsparseOperation_t opA = HIPSPARSE_OPERATION_NON_TRANSPOSE,opB = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* cuSPARSE spgemm doesn't support transpose yet */
PetscFunctionBegin;
MatCheckProduct(C,1);
if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Product data not empty");
A = product->A;
B = product->B;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for type %s",((PetscObject)A)->type_name);
ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for B of type %s",((PetscObject)B)->type_name);
a = (Mat_SeqAIJ*)A->data;
b = (Mat_SeqAIJ*)B->data;
Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr;
if (Acusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format");
if (Bcusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format");
/* product data */
ierr = PetscNew(&mmdata);CHKERRQ(ierr);
C->product->data = mmdata;
C->product->destroy = MatDestroy_MatMatCusparse;
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
ptype = product->type;
if (A->symmetric && ptype == MATPRODUCT_AtB) ptype = MATPRODUCT_AB;
if (B->symmetric && ptype == MATPRODUCT_ABt) ptype = MATPRODUCT_AB;
biscompressed = PETSC_FALSE;
ciscompressed = PETSC_FALSE;
switch (ptype) {
case MATPRODUCT_AB:
m = A->rmap->n;
n = B->cmap->n;
k = A->cmap->n;
Amat = Acusp->mat;
Bmat = Bcusp->mat;
if (a->compressedrow.use) ciscompressed = PETSC_TRUE;
if (b->compressedrow.use) biscompressed = PETSC_TRUE;
break;
case MATPRODUCT_AtB:
m = A->cmap->n;
n = B->cmap->n;
k = A->rmap->n;
ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr);
Amat = Acusp->matTranspose;
Bmat = Bcusp->mat;
if (b->compressedrow.use) biscompressed = PETSC_TRUE;
break;
case MATPRODUCT_ABt:
m = A->rmap->n;
n = B->rmap->n;
k = A->cmap->n;
ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(B);CHKERRQ(ierr);
Amat = Acusp->mat;
Bmat = Bcusp->matTranspose;
if (a->compressedrow.use) ciscompressed = PETSC_TRUE;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Unsupported product type %s",MatProductTypes[product->type]);
}
/* create cusparse matrix */
ierr = MatSetSizes(C,m,n,m,n);CHKERRQ(ierr);
ierr = MatSetType(C,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
c = (Mat_SeqAIJ*)C->data;
Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr;
Cmat = new Mat_SeqAIJCUSPARSEMultStruct;
Ccsr = new CsrMatrix;
c->compressedrow.use = ciscompressed;
if (c->compressedrow.use) { /* if a is in compressed row, than c will be in compressed row format */
c->compressedrow.nrows = a->compressedrow.nrows;
ierr = PetscMalloc2(c->compressedrow.nrows+1,&c->compressedrow.i,c->compressedrow.nrows,&c->compressedrow.rindex);CHKERRQ(ierr);
ierr = PetscArraycpy(c->compressedrow.rindex,a->compressedrow.rindex,c->compressedrow.nrows);CHKERRQ(ierr);
Ccusp->workVector = new THRUSTARRAY(c->compressedrow.nrows);
Cmat->cprowIndices = new THRUSTINTARRAY(c->compressedrow.nrows);
Cmat->cprowIndices->assign(c->compressedrow.rindex,c->compressedrow.rindex + c->compressedrow.nrows);
} else {
c->compressedrow.nrows = 0;
c->compressedrow.i = NULL;
c->compressedrow.rindex = NULL;
Ccusp->workVector = NULL;
Cmat->cprowIndices = NULL;
}
Ccusp->nrows = ciscompressed ? c->compressedrow.nrows : m;
Ccusp->mat = Cmat;
Ccusp->mat->mat = Ccsr;
Ccsr->num_rows = Ccusp->nrows;
Ccsr->num_cols = n;
Ccsr->row_offsets = new THRUSTINTARRAY32(Ccusp->nrows+1);
stat = hipsparseCreateMatDescr(&Cmat->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(Cmat->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatType(Cmat->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
cerr = hipMalloc((void **)&(Cmat->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&(Cmat->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMemcpy(Cmat->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMemcpy(Cmat->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
if (!Ccsr->num_rows || !Ccsr->num_cols || !a->nz || !b->nz) { /* cusparse raise errors in different calls when matrices have zero rows/columns! */
thrust::fill(thrust::device,Ccsr->row_offsets->begin(),Ccsr->row_offsets->end(),0);
c->nz = 0;
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
Ccsr->values = new THRUSTARRAY(c->nz);
goto finalizesym;
}
if (!Amat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing A mult struct for product type %s",MatProductTypes[ptype]);
if (!Bmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing B mult struct for product type %s",MatProductTypes[ptype]);
Acsr = (CsrMatrix*)Amat->mat;
if (!biscompressed) {
Bcsr = (CsrMatrix*)Bmat->mat;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
BmatSpDescr = Bmat->matDescr;
#endif
} else { /* we need to use row offsets for the full matrix */
CsrMatrix *cBcsr = (CsrMatrix*)Bmat->mat;
Bcsr = new CsrMatrix;
Bcsr->num_rows = B->rmap->n;
Bcsr->num_cols = cBcsr->num_cols;
Bcsr->num_entries = cBcsr->num_entries;
Bcsr->column_indices = cBcsr->column_indices;
Bcsr->values = cBcsr->values;
if (!Bcusp->rowoffsets_gpu) {
Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1);
Bcusp->rowoffsets_gpu->assign(b->i,b->i + B->rmap->n + 1);
ierr = PetscLogCpuToGpu((B->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
Bcsr->row_offsets = Bcusp->rowoffsets_gpu;
mmdata->Bcsr = Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (Bcsr->num_rows && Bcsr->num_cols) {
stat = hipsparseCreateCsr(&mmdata->matSpBDescr, Bcsr->num_rows, Bcsr->num_cols, Bcsr->num_entries,
Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Bcsr->values->data().get(),
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
}
BmatSpDescr = mmdata->matSpBDescr;
#endif
}
if (!Acsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing A CSR struct");
if (!Bcsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing B CSR struct");
/* precompute flops count */
if (ptype == MATPRODUCT_AB) {
for (i=0, flops = 0; i<A->rmap->n; i++) {
const PetscInt st = a->i[i];
const PetscInt en = a->i[i+1];
for (j=st; j<en; j++) {
const PetscInt brow = a->j[j];
flops += 2.*(b->i[brow+1] - b->i[brow]);
}
}
} else if (ptype == MATPRODUCT_AtB) {
for (i=0, flops = 0; i<A->rmap->n; i++) {
const PetscInt anzi = a->i[i+1] - a->i[i];
const PetscInt bnzi = b->i[i+1] - b->i[i];
flops += (2.*anzi)*bnzi;
}
} else { /* TODO */
flops = 0.;
}
mmdata->flops = flops;
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
stat = hipsparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, 0,
NULL, NULL, NULL,
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
stat = hipsparseSpGEMM_createDescr(&mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(11,4,0)
{
/* cusparseSpGEMMreuse has more reasonable APIs than cusparseSpGEMM, so we prefer to use it.
We follow the sample code at https://github.com/NVIDIA/CUDALibrarySamples/blob/master/cuSPARSE/spgemm_reuse
*/
void* dBuffer1 = NULL;
void* dBuffer2 = NULL;
void* dBuffer3 = NULL;
/* dBuffer4, dBuffer5 are needed by cusparseSpGEMMreuse_compute, and therefore are stored in mmdata */
size_t bufferSize1 = 0;
size_t bufferSize2 = 0;
size_t bufferSize3 = 0;
size_t bufferSize4 = 0;
size_t bufferSize5 = 0;
/*----------------------------------------------------------------------*/
/* ask bufferSize1 bytes for external memory */
stat = cusparseSpGEMMreuse_workEstimation(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr,
HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc,
&bufferSize1, NULL);CHKERRCUSPARSE(stat);
cerr = hipMalloc((void**) &dBuffer1, bufferSize1);CHKERRCUDA(cerr);
/* inspect the matrices A and B to understand the memory requirement for the next step */
stat = cusparseSpGEMMreuse_workEstimation(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr,
HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc,
&bufferSize1, dBuffer1);CHKERRCUSPARSE(stat);
/*----------------------------------------------------------------------*/
stat = cusparseSpGEMMreuse_nnz(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr,
HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc,
&bufferSize2, NULL, &bufferSize3, NULL, &bufferSize4, NULL);CHKERRCUSPARSE(stat);
cerr = hipMalloc((void**) &dBuffer2, bufferSize2);CHKERRCUDA(cerr);
cerr = hipMalloc((void**) &dBuffer3, bufferSize3);CHKERRCUDA(cerr);
cerr = hipMalloc((void**) &mmdata->dBuffer4, bufferSize4);CHKERRCUDA(cerr);
stat = cusparseSpGEMMreuse_nnz(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr,
HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc,
&bufferSize2, dBuffer2, &bufferSize3, dBuffer3, &bufferSize4, mmdata->dBuffer4);CHKERRCUSPARSE(stat);
cerr = hipFree(dBuffer1);CHKERRCUDA(cerr);
cerr = hipFree(dBuffer2);CHKERRCUDA(cerr);
/*----------------------------------------------------------------------*/
/* get matrix C non-zero entries C_nnz1 */
stat = hipsparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1);CHKERRCUSPARSE(stat);
c->nz = (PetscInt) C_nnz1;
/* allocate matrix C */
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */
Ccsr->values = new THRUSTARRAY(c->nz);CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */
/* update matC with the new pointers */
stat = hipsparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(),
Ccsr->values->data().get());CHKERRCUSPARSE(stat);
/*----------------------------------------------------------------------*/
stat = cusparseSpGEMMreuse_copy(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr,
HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc,
&bufferSize5, NULL);CHKERRCUSPARSE(stat);
cerr = hipMalloc((void**) &mmdata->dBuffer5, bufferSize5);CHKERRCUDA(cerr);
stat = cusparseSpGEMMreuse_copy(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr,
HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc,
&bufferSize5, mmdata->dBuffer5);CHKERRCUSPARSE(stat);
cerr = hipFree(dBuffer3);CHKERRCUDA(cerr);
stat = cusparseSpGEMMreuse_compute(Ccusp->handle, opA, opB,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
ierr = PetscInfo9(C,"Buffer sizes for type %s, result %D x %D (k %D, nzA %D, nzB %D, nzC %D) are: %ldKB %ldKB\n",MatProductTypes[ptype],m,n,k,a->nz,b->nz,c->nz,bufferSize4/1024,bufferSize5/1024);CHKERRQ(ierr);
}
#else // ~PETSC_PKG_CUDA_VERSION_GE(11,4,0)
size_t bufSize2;
/* ask bufferSize bytes for external memory */
stat = hipsparseSpGEMM_workEstimation(Ccusp->handle, opA, opB,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &bufSize2, NULL);CHKERRCUSPARSE(stat);
cerr = hipMalloc((void**) &mmdata->mmBuffer2, bufSize2);CHKERRCUDA(cerr);
/* inspect the matrices A and B to understand the memory requirement for the next step */
stat = hipsparseSpGEMM_workEstimation(Ccusp->handle, opA, opB,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &bufSize2, mmdata->mmBuffer2);CHKERRCUSPARSE(stat);
/* ask bufferSize again bytes for external memory */
stat = hipsparseSpGEMM_compute(Ccusp->handle, opA, opB,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &mmdata->mmBufferSize, NULL);CHKERRCUSPARSE(stat);
/* The CUSPARSE documentation is not clear, nor the API
We need both buffers to perform the operations properly!
mmdata->mmBuffer2 does not appear anywhere in the compute/copy API
it only appears for the workEstimation stuff, but it seems it is needed in compute, so probably the address
is stored in the descriptor! What a messy API... */
cerr = hipMalloc((void**) &mmdata->mmBuffer, mmdata->mmBufferSize);CHKERRCUDA(cerr);
/* compute the intermediate product of A * B */
stat = hipsparseSpGEMM_compute(Ccusp->handle, opA, opB,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);CHKERRCUSPARSE(stat);
/* get matrix C non-zero entries C_nnz1 */
stat = hipsparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1);CHKERRCUSPARSE(stat);
c->nz = (PetscInt) C_nnz1;
ierr = PetscInfo9(C,"Buffer sizes for type %s, result %D x %D (k %D, nzA %D, nzB %D, nzC %D) are: %ldKB %ldKB\n",MatProductTypes[ptype],m,n,k,a->nz,b->nz,c->nz,bufSize2/1024,mmdata->mmBufferSize/1024);CHKERRQ(ierr);
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */
Ccsr->values = new THRUSTARRAY(c->nz);
CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */
stat = hipsparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(),
Ccsr->values->data().get());CHKERRCUSPARSE(stat);
stat = hipsparseSpGEMM_copy(Ccusp->handle, opA, opB,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
#endif
#else
stat = hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_HOST);CHKERRCUSPARSE(stat);
stat = hipsparseXcsrgemmNnz(Ccusp->handle, opA, opB,
Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols,
Amat->descr, Acsr->num_entries, Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(),
Bmat->descr, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Cmat->descr, Ccsr->row_offsets->data().get(), &cnz);CHKERRCUSPARSE(stat);
c->nz = cnz;
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */
Ccsr->values = new THRUSTARRAY(c->nz);
CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */
stat = hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
/* with the old gemm interface (removed from 11.0 on) we cannot compute the symbolic factorization only.
I have tried using the gemm2 interface (alpha * A * B + beta * D), which allows to do symbolic by passing NULL for values, but it seems quite buggy when
D is NULL, despite the fact that CUSPARSE documentation claims it is supported! */
stat = cusparse_csr_spgemm(Ccusp->handle, opA, opB,
Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols,
Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(),
Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());CHKERRCUSPARSE(stat);
#endif
ierr = PetscLogGpuFlops(mmdata->flops);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
finalizesym:
c->singlemalloc = PETSC_FALSE;
c->free_a = PETSC_TRUE;
c->free_ij = PETSC_TRUE;
ierr = PetscMalloc1(m+1,&c->i);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->j);CHKERRQ(ierr);
if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */
PetscInt *d_i = c->i;
THRUSTINTARRAY ii(Ccsr->row_offsets->size());
THRUSTINTARRAY jj(Ccsr->column_indices->size());
ii = *Ccsr->row_offsets;
jj = *Ccsr->column_indices;
if (ciscompressed) d_i = c->compressedrow.i;
cerr = hipMemcpy(d_i,ii.data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = hipMemcpy(c->j,jj.data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
} else {
PetscInt *d_i = c->i;
if (ciscompressed) d_i = c->compressedrow.i;
cerr = hipMemcpy(d_i,Ccsr->row_offsets->data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = hipMemcpy(c->j,Ccsr->column_indices->data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
if (ciscompressed) { /* need to expand host row offsets */
PetscInt r = 0;
c->i[0] = 0;
for (k = 0; k < c->compressedrow.nrows; k++) {
const PetscInt next = c->compressedrow.rindex[k];
const PetscInt old = c->compressedrow.i[k];
for (; r < next; r++) c->i[r+1] = old;
}
for (; r < m; r++) c->i[r+1] = c->compressedrow.i[c->compressedrow.nrows];
}
ierr = PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size())*sizeof(PetscInt));CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->ilen);CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->imax);CHKERRQ(ierr);
c->maxnz = c->nz;
c->nonzerorowcnt = 0;
c->rmax = 0;
for (k = 0; k < m; k++) {
const PetscInt nn = c->i[k+1] - c->i[k];
c->ilen[k] = c->imax[k] = nn;
c->nonzerorowcnt += (PetscInt)!!nn;
c->rmax = PetscMax(c->rmax,nn);
}
ierr = MatMarkDiagonal_SeqAIJ(C);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->a);CHKERRQ(ierr);
Ccsr->num_entries = c->nz;
C->nonzerostate++;
ierr = PetscLayoutSetUp(C->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(C->cmap);CHKERRQ(ierr);
Ccusp->nonzerostate = C->nonzerostate;
C->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
C->preallocated = PETSC_TRUE;
C->assembled = PETSC_FALSE;
C->was_assembled = PETSC_FALSE;
if (product->api_user && A->offloadmask == PETSC_OFFLOAD_BOTH && B->offloadmask == PETSC_OFFLOAD_BOTH) { /* flag the matrix C values as computed, so that the numeric phase will only call MatAssembly */
mmdata->reusesym = PETSC_TRUE;
C->offloadmask = PETSC_OFFLOAD_GPU;
}
C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatProductSetFromOptions_SeqAIJ_SeqDense(Mat);
/* handles sparse or dense B */
static PetscErrorCode MatProductSetFromOptions_SeqAIJCUSPARSE(Mat mat)
{
Mat_Product *product = mat->product;
PetscErrorCode ierr;
PetscBool isdense = PETSC_FALSE,Biscusp = PETSC_FALSE,Ciscusp = PETSC_TRUE;
PetscFunctionBegin;
MatCheckProduct(mat,1);
ierr = PetscObjectBaseTypeCompare((PetscObject)product->B,MATSEQDENSE,&isdense);CHKERRQ(ierr);
if (!product->A->boundtocpu && !product->B->boundtocpu) {
ierr = PetscObjectTypeCompare((PetscObject)product->B,MATSEQAIJCUSPARSE,&Biscusp);CHKERRQ(ierr);
}
if (product->type == MATPRODUCT_ABC) {
Ciscusp = PETSC_FALSE;
if (!product->C->boundtocpu) {
ierr = PetscObjectTypeCompare((PetscObject)product->C,MATSEQAIJCUSPARSE,&Ciscusp);CHKERRQ(ierr);
}
}
if (Biscusp && Ciscusp) { /* we can always select the CPU backend */
PetscBool usecpu = PETSC_FALSE;
switch (product->type) {
case MATPRODUCT_AB:
if (product->api_user) {
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatMatMult","Mat");CHKERRQ(ierr);
ierr = PetscOptionsBool("-matmatmult_backend_cpu","Use CPU code","MatMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
} else {
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_AB","Mat");CHKERRQ(ierr);
ierr = PetscOptionsBool("-matproduct_ab_backend_cpu","Use CPU code","MatMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
}
break;
case MATPRODUCT_AtB:
if (product->api_user) {
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatTransposeMatMult","Mat");CHKERRQ(ierr);
ierr = PetscOptionsBool("-mattransposematmult_backend_cpu","Use CPU code","MatTransposeMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
} else {
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_AtB","Mat");CHKERRQ(ierr);
ierr = PetscOptionsBool("-matproduct_atb_backend_cpu","Use CPU code","MatTransposeMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
}
break;
case MATPRODUCT_PtAP:
if (product->api_user) {
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatPtAP","Mat");CHKERRQ(ierr);
ierr = PetscOptionsBool("-matptap_backend_cpu","Use CPU code","MatPtAP",usecpu,&usecpu,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
} else {
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_PtAP","Mat");CHKERRQ(ierr);
ierr = PetscOptionsBool("-matproduct_ptap_backend_cpu","Use CPU code","MatPtAP",usecpu,&usecpu,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
}
break;
case MATPRODUCT_RARt:
if (product->api_user) {
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatRARt","Mat");CHKERRQ(ierr);
ierr = PetscOptionsBool("-matrart_backend_cpu","Use CPU code","MatRARt",usecpu,&usecpu,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
} else {
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_RARt","Mat");CHKERRQ(ierr);
ierr = PetscOptionsBool("-matproduct_rart_backend_cpu","Use CPU code","MatRARt",usecpu,&usecpu,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
}
break;
case MATPRODUCT_ABC:
if (product->api_user) {
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatMatMatMult","Mat");CHKERRQ(ierr);
ierr = PetscOptionsBool("-matmatmatmult_backend_cpu","Use CPU code","MatMatMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
} else {
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_ABC","Mat");CHKERRQ(ierr);
ierr = PetscOptionsBool("-matproduct_abc_backend_cpu","Use CPU code","MatMatMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
}
break;
default:
break;
}
if (usecpu) Biscusp = Ciscusp = PETSC_FALSE;
}
/* dispatch */
if (isdense) {
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_AtB:
case MATPRODUCT_ABt:
case MATPRODUCT_PtAP:
case MATPRODUCT_RARt:
if (product->A->boundtocpu) {
ierr = MatProductSetFromOptions_SeqAIJ_SeqDense(mat);CHKERRQ(ierr);
} else {
mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA;
}
break;
case MATPRODUCT_ABC:
mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic;
break;
default:
break;
}
} else if (Biscusp && Ciscusp) {
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_AtB:
case MATPRODUCT_ABt:
mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE;
break;
case MATPRODUCT_PtAP:
case MATPRODUCT_RARt:
case MATPRODUCT_ABC:
mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic;
break;
default:
break;
}
} else { /* fallback for AIJ */
ierr = MatProductSetFromOptions_SeqAIJ(mat);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy, Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_TRUE,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_TRUE,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
__global__ static void ScatterAdd(PetscInt n, PetscInt *idx,const PetscScalar *x,PetscScalar *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[idx[i]] += x[i];
}
/* z = op(A) x + y. If trans & !herm, op = ^T; if trans & herm, op = ^H; if !trans, op = no-op */
static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz,PetscBool trans,PetscBool herm)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct;
PetscScalar *xarray,*zarray,*dptr,*beta,*xptr;
PetscErrorCode ierr;
hipsparseStatus_t stat;
hipsparseOperation_t opA = HIPSPARSE_OPERATION_NON_TRANSPOSE;
PetscBool compressed;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
PetscInt nx,ny;
#endif
PetscFunctionBegin;
if (herm && !trans) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_GPU,"Hermitian and not transpose not supported");
if (!a->nonzerorowcnt) {
if (!yy) {ierr = VecSet_SeqCUDA(zz,0);CHKERRQ(ierr);}
else {ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr);}
PetscFunctionReturn(0);
}
/* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
if (!trans) {
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat;
if (!matstruct) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_GPU,"SeqAIJCUSPARSE does not have a 'mat' (need to fix)");
} else {
if (herm || !A->form_explicit_transpose) {
opA = herm ? HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE : HIPSPARSE_OPERATION_TRANSPOSE;
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat;
} else {
if (!cusparsestruct->matTranspose) {ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr);}
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose;
}
}
/* Does the matrix use compressed rows (i.e., drop zero rows)? */
compressed = matstruct->cprowIndices ? PETSC_TRUE : PETSC_FALSE;
try {
ierr = VecCUDAGetArrayRead(xx,(const PetscScalar**)&xarray);CHKERRQ(ierr);
if (yy == zz) {ierr = VecCUDAGetArray(zz,&zarray);CHKERRQ(ierr);} /* read & write zz, so need to get uptodate zarray on GPU */
else {ierr = VecCUDAGetArrayWrite(zz,&zarray);CHKERRQ(ierr);} /* write zz, so no need to init zarray on GPU */
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (opA == HIPSPARSE_OPERATION_NON_TRANSPOSE) {
/* z = A x + beta y.
If A is compressed (with less rows), then Ax is shorter than the full z, so we need a work vector to store Ax.
When A is non-compressed, and z = y, we can set beta=1 to compute y = Ax + y in one call.
*/
xptr = xarray;
dptr = compressed ? cusparsestruct->workVector->data().get() : zarray;
beta = (yy == zz && !compressed) ? matstruct->beta_one : matstruct->beta_zero;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
/* Get length of x, y for y=Ax. ny might be shorter than the work vector's allocated length, since the work vector is
allocated to accommodate different uses. So we get the length info directly from mat.
*/
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *mat = (CsrMatrix*)matstruct->mat;
nx = mat->num_cols;
ny = mat->num_rows;
}
#endif
} else {
/* z = A^T x + beta y
If A is compressed, then we need a work vector as the shorter version of x to compute A^T x.
Note A^Tx is of full length, so we set beta to 1.0 if y exists.
*/
xptr = compressed ? cusparsestruct->workVector->data().get() : xarray;
dptr = zarray;
beta = yy ? matstruct->beta_one : matstruct->beta_zero;
if (compressed) { /* Scatter x to work vector */
thrust::device_ptr<PetscScalar> xarr = thrust::device_pointer_cast(xarray);
thrust::for_each(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(),
VecCUDAEqualsReverse());
}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *mat = (CsrMatrix*)matstruct->mat;
nx = mat->num_rows;
ny = mat->num_cols;
}
#endif
}
/* csr_spmv does y = alpha op(A) x + beta y */
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (opA < 0 || opA > 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE ABI on hipsparseOperation_t has changed and PETSc has not been updated accordingly");
if (!matstruct->cuSpMV[opA].initialized) { /* built on demand */
hipError_t cerr;
stat = hipsparseCreateDnVec(&matstruct->cuSpMV[opA].vecXDescr,nx,xptr,cusparse_scalartype);CHKERRCUSPARSE(stat);
stat = hipsparseCreateDnVec(&matstruct->cuSpMV[opA].vecYDescr,ny,dptr,cusparse_scalartype);CHKERRCUSPARSE(stat);
stat = hipsparseSpMV_bufferSize(cusparsestruct->handle, opA, matstruct->alpha_one,
matstruct->matDescr,
matstruct->cuSpMV[opA].vecXDescr, beta,
matstruct->cuSpMV[opA].vecYDescr,
cusparse_scalartype,
cusparsestruct->spmvAlg,
&matstruct->cuSpMV[opA].spmvBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&matstruct->cuSpMV[opA].spmvBuffer,matstruct->cuSpMV[opA].spmvBufferSize);CHKERRCUDA(cerr);
matstruct->cuSpMV[opA].initialized = PETSC_TRUE;
} else {
/* x, y's value pointers might change between calls, but their shape is kept, so we just update pointers */
stat = hipsparseDnVecSetValues(matstruct->cuSpMV[opA].vecXDescr,xptr);CHKERRCUSPARSE(stat);
stat = hipsparseDnVecSetValues(matstruct->cuSpMV[opA].vecYDescr,dptr);CHKERRCUSPARSE(stat);
}
stat = hipsparseSpMV(cusparsestruct->handle, opA,
matstruct->alpha_one,
matstruct->matDescr, /* built in MatSeqAIJCUSPARSECopyToGPU() or MatSeqAIJCUSPARSEFormExplicitTransposeForMult() */
matstruct->cuSpMV[opA].vecXDescr,
beta,
matstruct->cuSpMV[opA].vecYDescr,
cusparse_scalartype,
cusparsestruct->spmvAlg,
matstruct->cuSpMV[opA].spmvBuffer);CHKERRCUSPARSE(stat);
#else
CsrMatrix *mat = (CsrMatrix*)matstruct->mat;
stat = cusparse_csr_spmv(cusparsestruct->handle, opA,
mat->num_rows, mat->num_cols,
mat->num_entries, matstruct->alpha_one, matstruct->descr,
mat->values->data().get(), mat->row_offsets->data().get(),
mat->column_indices->data().get(), xptr, beta,
dptr);CHKERRCUSPARSE(stat);
#endif
} else {
if (cusparsestruct->nrows) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat;
stat = cusparse_hyb_spmv(cusparsestruct->handle, opA,
matstruct->alpha_one, matstruct->descr, hybMat,
xptr, beta,
dptr);CHKERRCUSPARSE(stat);
#endif
}
}
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
if (opA == HIPSPARSE_OPERATION_NON_TRANSPOSE) {
if (yy) { /* MatMultAdd: zz = A*xx + yy */
if (compressed) { /* A is compressed. We first copy yy to zz, then ScatterAdd the work vector to zz */
ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); /* zz = yy */
} else if (zz != yy) { /* A is not compressed. zz already contains A*xx, and we just need to add yy */
ierr = VecAXPY_SeqCUDA(zz,1.0,yy);CHKERRQ(ierr); /* zz += yy */
}
} else if (compressed) { /* MatMult: zz = A*xx. A is compressed, so we zero zz first, then ScatterAdd the work vector to zz */
ierr = VecSet_SeqCUDA(zz,0);CHKERRQ(ierr);
}
/* ScatterAdd the result from work vector into the full vector when A is compressed */
if (compressed) {
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* I wanted to make this for_each asynchronous but failed. thrust::async::for_each() returns an event (internally registerred)
and in the destructor of the scope, it will call hipStreamSynchronize() on this stream. One has to store all events to
prevent that. So I just add a ScatterAdd kernel.
*/
#if 0
thrust::device_ptr<PetscScalar> zptr = thrust::device_pointer_cast(zarray);
thrust::async::for_each(thrust::hip::par.on(cusparsestruct->stream),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(),
VecCUDAPlusEquals());
#else
PetscInt n = matstruct->cprowIndices->size();
hipLaunchKernelGGL(( ScatterAdd), dim3((n+255)/256),dim3(256),0,PetscDefaultCudaStream, n,matstruct->cprowIndices->data().get(),cusparsestruct->workVector->data().get(),zarray);
#endif
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
}
} else {
if (yy && yy != zz) {
ierr = VecAXPY_SeqCUDA(zz,1.0,yy);CHKERRQ(ierr); /* zz += yy */
}
}
ierr = VecCUDARestoreArrayRead(xx,(const PetscScalar**)&xarray);CHKERRQ(ierr);
if (yy == zz) {ierr = VecCUDARestoreArray(zz,&zarray);CHKERRQ(ierr);}
else {ierr = VecCUDARestoreArrayWrite(zz,&zarray);CHKERRQ(ierr);}
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
if (yy) {
ierr = PetscLogGpuFlops(2.0*a->nz);CHKERRQ(ierr);
} else {
ierr = PetscLogGpuFlops(2.0*a->nz-a->nonzerorowcnt);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A,MatAssemblyType mode)
{
PetscErrorCode ierr;
PetscObjectState onnz = A->nonzerostate;
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
ierr = MatAssemblyEnd_SeqAIJ(A,mode);CHKERRQ(ierr);
if (onnz != A->nonzerostate && cusp->deviceMat) {
hipError_t cerr;
ierr = PetscInfo(A,"Destroy device mat since nonzerostate changed\n");CHKERRQ(ierr);
cerr = hipFree(cusp->deviceMat);CHKERRCUDA(cerr);
cusp->deviceMat = NULL;
}
PetscFunctionReturn(0);
}
/* --------------------------------------------------------------------------------*/
/*@
MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format
(the default parallel PETSc format). This matrix will ultimately pushed down
to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix
assembly performance the user should preallocate the matrix storage by setting
the parameter nz (or the array nnz). By setting these parameters accurately,
performance during matrix assembly can be increased by more than a factor of 50.
Collective
Input Parameters:
+ comm - MPI communicator, set to PETSC_COMM_SELF
. m - number of rows
. n - number of columns
. nz - number of nonzeros per row (same for all rows)
- nnz - array containing the number of nonzeros in the various rows
(possibly different for each row) or NULL
Output Parameter:
. A - the matrix
It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
MatXXXXSetPreallocation() paradgm instead of this routine directly.
[MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
Notes:
If nnz is given then nz is ignored
The AIJ format (also called the Yale sparse matrix format or
compressed row storage), is fully compatible with standard Fortran 77
storage. That is, the stored row and column indices can begin at
either one (as in Fortran) or zero. See the users' manual for details.
Specify the preallocated storage with either nz or nnz (not both).
Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory
allocation. For large problems you MUST preallocate memory or you
will get TERRIBLE performance, see the users' manual chapter on matrices.
By default, this format uses inodes (identical nodes) when possible, to
improve numerical efficiency of matrix-vector products and solves. We
search for consecutive rows with the same nonzero structure, thereby
reusing matrix information to achieve increased efficiency.
Level: intermediate
.seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATSEQAIJCUSPARSE, MATAIJCUSPARSE
@*/
PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatCreate(comm,A);CHKERRQ(ierr);
ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr);
ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,(PetscInt*)nnz);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) {
ierr = MatSeqAIJCUSPARSE_Destroy((Mat_SeqAIJCUSPARSE**)&A->spptr);CHKERRQ(ierr);
} else {
ierr = MatSeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors**)&A->spptr);CHKERRQ(ierr);
}
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPARSESetFormat_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatFactorGetSolverType_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatConvert_seqaijcusparse_hypre_C",NULL);CHKERRQ(ierr);
ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat,MatType,MatReuse,Mat*);
static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat,PetscBool);
static PetscErrorCode MatDuplicate_SeqAIJCUSPARSE(Mat A,MatDuplicateOption cpvalues,Mat *B)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatDuplicate_SeqAIJ(A,cpvalues,B);CHKERRQ(ierr);
ierr = MatConvert_SeqAIJ_SeqAIJCUSPARSE(*B,MATSEQAIJCUSPARSE,MAT_INPLACE_MATRIX,B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat Y,PetscScalar a,Mat X,MatStructure str)
{
PetscErrorCode ierr;
Mat_SeqAIJ *x = (Mat_SeqAIJ*)X->data,*y = (Mat_SeqAIJ*)Y->data;
Mat_SeqAIJCUSPARSE *cy;
Mat_SeqAIJCUSPARSE *cx;
PetscScalar *ay;
const PetscScalar *ax;
CsrMatrix *csry,*csrx;
PetscFunctionBegin;
cy = (Mat_SeqAIJCUSPARSE*)Y->spptr;
cx = (Mat_SeqAIJCUSPARSE*)X->spptr;
if (X->ops->axpy != Y->ops->axpy) {
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(Y,PETSC_FALSE);CHKERRQ(ierr);
ierr = MatAXPY_SeqAIJ(Y,a,X,str);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* if we are here, it means both matrices are bound to GPU */
ierr = MatSeqAIJCUSPARSECopyToGPU(Y);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(X);CHKERRQ(ierr);
if (cy->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)Y),PETSC_ERR_GPU,"only MAT_CUSPARSE_CSR supported");
if (cx->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)X),PETSC_ERR_GPU,"only MAT_CUSPARSE_CSR supported");
csry = (CsrMatrix*)cy->mat->mat;
csrx = (CsrMatrix*)cx->mat->mat;
/* see if we can turn this into a cublas axpy */
if (str != SAME_NONZERO_PATTERN && x->nz == y->nz && !x->compressedrow.use && !y->compressedrow.use) {
bool eq = thrust::equal(thrust::device,csry->row_offsets->begin(),csry->row_offsets->end(),csrx->row_offsets->begin());
if (eq) {
eq = thrust::equal(thrust::device,csry->column_indices->begin(),csry->column_indices->end(),csrx->column_indices->begin());
}
if (eq) str = SAME_NONZERO_PATTERN;
}
/* spgeam is buggy with one column */
if (Y->cmap->n == 1 && str != SAME_NONZERO_PATTERN) str = DIFFERENT_NONZERO_PATTERN;
if (str == SUBSET_NONZERO_PATTERN) {
hipsparseStatus_t stat;
PetscScalar b = 1.0;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
size_t bufferSize;
void *buffer;
hipError_t cerr;
#endif
ierr = MatSeqAIJCUSPARSEGetArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr);
stat = hipsparseSetPointerMode(cy->handle, HIPSPARSE_POINTER_MODE_HOST);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparse_csr_spgeam_bufferSize(cy->handle,Y->rmap->n,Y->cmap->n,
&a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(),
&b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),
cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),&bufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&buffer,bufferSize);CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
stat = cusparse_csr_spgeam(cy->handle,Y->rmap->n,Y->cmap->n,
&a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(),
&b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),
cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),buffer);CHKERRCUSPARSE(stat);
ierr = PetscLogGpuFlops(x->nz + y->nz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
cerr = hipFree(buffer);CHKERRCUDA(cerr);
#else
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
stat = cusparse_csr_spgeam(cy->handle,Y->rmap->n,Y->cmap->n,
&a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(),
&b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),
cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get());CHKERRCUSPARSE(stat);
ierr = PetscLogGpuFlops(x->nz + y->nz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
#endif
stat = hipsparseSetPointerMode(cy->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
ierr = MatSeqAIJCUSPARSERestoreArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr);
} else if (str == SAME_NONZERO_PATTERN) {
hipblasHandle_t cublasv2handle;
hipblasStatus_t berr;
PetscBLASInt one = 1, bnz = 1;
ierr = MatSeqAIJCUSPARSEGetArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr);
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
ierr = PetscBLASIntCast(x->nz,&bnz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
berr = cublasXaxpy(cublasv2handle,bnz,&a,ax,one,ay,one);CHKERRCUBLAS(berr);
ierr = PetscLogGpuFlops(2.0*bnz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr);
} else {
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(Y,PETSC_FALSE);CHKERRQ(ierr);
ierr = MatAXPY_SeqAIJ(Y,a,X,str);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat Y,PetscScalar a)
{
PetscErrorCode ierr;
Mat_SeqAIJ *y = (Mat_SeqAIJ*)Y->data;
PetscScalar *ay;
hipblasHandle_t cublasv2handle;
hipblasStatus_t berr;
PetscBLASInt one = 1, bnz = 1;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr);
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
ierr = PetscBLASIntCast(y->nz,&bnz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
berr = cublasXscal(cublasv2handle,bnz,&a,ay,one);CHKERRCUBLAS(berr);
ierr = PetscLogGpuFlops(bnz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatZeroEntries_SeqAIJCUSPARSE(Mat A)
{
PetscErrorCode ierr;
PetscBool both = PETSC_FALSE;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) {
Mat_SeqAIJCUSPARSE *spptr = (Mat_SeqAIJCUSPARSE*)A->spptr;
if (spptr->mat) {
CsrMatrix* matrix = (CsrMatrix*)spptr->mat->mat;
if (matrix->values) {
both = PETSC_TRUE;
thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.);
}
}
if (spptr->matTranspose) {
CsrMatrix* matrix = (CsrMatrix*)spptr->matTranspose->mat;
if (matrix->values) {
thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.);
}
}
}
//ierr = MatZeroEntries_SeqAIJ(A);CHKERRQ(ierr);
ierr = PetscArrayzero(a->a,a->i[A->rmap->n]);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(A);CHKERRQ(ierr);
if (both) A->offloadmask = PETSC_OFFLOAD_BOTH;
else A->offloadmask = PETSC_OFFLOAD_CPU;
PetscFunctionReturn(0);
}
static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat A,PetscBool flg)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (A->factortype != MAT_FACTOR_NONE) PetscFunctionReturn(0);
if (flg) {
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
A->ops->scale = MatScale_SeqAIJ;
A->ops->axpy = MatAXPY_SeqAIJ;
A->ops->zeroentries = MatZeroEntries_SeqAIJ;
A->ops->mult = MatMult_SeqAIJ;
A->ops->multadd = MatMultAdd_SeqAIJ;
A->ops->multtranspose = MatMultTranspose_SeqAIJ;
A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJ;
A->ops->multhermitiantranspose = NULL;
A->ops->multhermitiantransposeadd = NULL;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJ;
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJGetArray_C",MatSeqAIJGetArray_SeqAIJ);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",NULL);CHKERRQ(ierr);
} else {
A->ops->scale = MatScale_SeqAIJCUSPARSE;
A->ops->axpy = MatAXPY_SeqAIJCUSPARSE;
A->ops->zeroentries = MatZeroEntries_SeqAIJCUSPARSE;
A->ops->mult = MatMult_SeqAIJCUSPARSE;
A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE;
A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE;
A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE;
A->ops->multhermitiantranspose = MatMultHermitianTranspose_SeqAIJCUSPARSE;
A->ops->multhermitiantransposeadd = MatMultHermitianTransposeAdd_SeqAIJCUSPARSE;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJCUSPARSE;
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",MatSeqAIJCopySubArray_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",MatSetPreallocationCOO_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",MatSetValuesCOO_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJGetArray_C",MatSeqAIJGetArray_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr);
}
A->boundtocpu = flg;
a->inode.use = flg;
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat A, MatType mtype, MatReuse reuse, Mat* newmat)
{
PetscErrorCode ierr;
hipsparseStatus_t stat;
Mat B;
PetscFunctionBegin;
ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); /* first use of CUSPARSE may be via MatConvert */
if (reuse == MAT_INITIAL_MATRIX) {
ierr = MatDuplicate(A,MAT_COPY_VALUES,newmat);CHKERRQ(ierr);
} else if (reuse == MAT_REUSE_MATRIX) {
ierr = MatCopy(A,*newmat,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
}
B = *newmat;
ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr);
ierr = PetscStrallocpy(VECCUDA,&B->defaultvectype);CHKERRQ(ierr);
if (reuse != MAT_REUSE_MATRIX && !B->spptr) {
if (B->factortype == MAT_FACTOR_NONE) {
Mat_SeqAIJCUSPARSE *spptr;
ierr = PetscNew(&spptr);CHKERRQ(ierr);
stat = hipsparseCreate(&spptr->handle);CHKERRCUSPARSE(stat);
stat = hipsparseSetStream(spptr->handle,PetscDefaultCudaStream);CHKERRCUSPARSE(stat);
spptr->format = MAT_CUSPARSE_CSR;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
#if PETSC_PKG_CUDA_VERSION_GE(11,4,0)
spptr->spmvAlg = CUSPARSE_SPMV_CSR_ALG1; /* default, since we only support csr */
#else
spptr->spmvAlg = HIPSPARSE_CSRMV_ALG1; /* default, since we only support csr */
#endif
spptr->spmmAlg = HIPSPARSE_CSRMM_ALG1; /* default, only support column-major dense matrix B */
spptr->csr2cscAlg = HIPSPARSE_CSR2CSC_ALG1;
#endif
B->spptr = spptr;
} else {
Mat_SeqAIJCUSPARSETriFactors *spptr;
ierr = PetscNew(&spptr);CHKERRQ(ierr);
stat = hipsparseCreate(&spptr->handle);CHKERRCUSPARSE(stat);
stat = hipsparseSetStream(spptr->handle,PetscDefaultCudaStream);CHKERRCUSPARSE(stat);
B->spptr = spptr;
}
B->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
}
B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE;
B->ops->destroy = MatDestroy_SeqAIJCUSPARSE;
B->ops->setoption = MatSetOption_SeqAIJCUSPARSE;
B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE;
B->ops->bindtocpu = MatBindToCPU_SeqAIJCUSPARSE;
B->ops->duplicate = MatDuplicate_SeqAIJCUSPARSE;
ierr = MatBindToCPU_SeqAIJCUSPARSE(B,PETSC_FALSE);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatCUSPARSESetFormat_C",MatCUSPARSESetFormat_SeqAIJCUSPARSE);CHKERRQ(ierr);
#if defined(PETSC_HAVE_HYPRE)
ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqaijcusparse_hypre_C",MatConvert_AIJ_HYPRE);CHKERRQ(ierr);
#endif
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatCreate_SeqAIJ(B);CHKERRQ(ierr);
ierr = MatConvert_SeqAIJ_SeqAIJCUSPARSE(B,MATSEQAIJCUSPARSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*MC
MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices.
A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either
CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later.
All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library.
Options Database Keys:
+ -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to MatSetFromOptions()
. -mat_cusparse_storage_format csr - sets the storage format of matrices (for MatMult and factors in MatSolve) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid).
- -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for MatMult) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid).
Level: beginner
.seealso: MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation
M*/
PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse_band(Mat,MatFactorType,Mat*);
PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_CUSPARSE(void)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSEBAND, MATSEQAIJ, MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse_band);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_CHOLESKY,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ILU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ICC,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **cusparsestruct)
{
PetscErrorCode ierr;
hipsparseStatus_t stat;
PetscFunctionBegin;
if (*cusparsestruct) {
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->mat,(*cusparsestruct)->format);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->matTranspose,(*cusparsestruct)->format);CHKERRQ(ierr);
delete (*cusparsestruct)->workVector;
delete (*cusparsestruct)->rowoffsets_gpu;
delete (*cusparsestruct)->cooPerm;
delete (*cusparsestruct)->cooPerm_a;
delete (*cusparsestruct)->csr2csc_i;
if ((*cusparsestruct)->handle) {stat = hipsparseDestroy((*cusparsestruct)->handle);CHKERRCUSPARSE(stat);}
ierr = PetscFree(*cusparsestruct);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat)
{
PetscFunctionBegin;
if (*mat) {
delete (*mat)->values;
delete (*mat)->column_indices;
delete (*mat)->row_offsets;
delete *mat;
*mat = 0;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor)
{
hipsparseStatus_t stat;
PetscErrorCode ierr;
PetscFunctionBegin;
if (*trifactor) {
if ((*trifactor)->descr) { stat = hipsparseDestroyMatDescr((*trifactor)->descr);CHKERRCUSPARSE(stat); }
if ((*trifactor)->solveInfo) { stat = cusparse_destroy_analysis_info((*trifactor)->solveInfo);CHKERRCUSPARSE(stat); }
ierr = CsrMatrix_Destroy(&(*trifactor)->csrMat);CHKERRQ(ierr);
if ((*trifactor)->solveBuffer) {hipError_t cerr = hipFree((*trifactor)->solveBuffer);CHKERRCUDA(cerr);}
if ((*trifactor)->AA_h) {hipError_t cerr = hipHostFree((*trifactor)->AA_h);CHKERRCUDA(cerr);}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if ((*trifactor)->csr2cscBuffer) {hipError_t cerr = hipFree((*trifactor)->csr2cscBuffer);CHKERRCUDA(cerr);}
#endif
ierr = PetscFree(*trifactor);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct,MatCUSPARSEStorageFormat format)
{
CsrMatrix *mat;
hipsparseStatus_t stat;
hipError_t err;
PetscFunctionBegin;
if (*matstruct) {
if ((*matstruct)->mat) {
if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat;
stat = cusparseDestroyHybMat(hybMat);CHKERRCUSPARSE(stat);
#endif
} else {
mat = (CsrMatrix*)(*matstruct)->mat;
CsrMatrix_Destroy(&mat);
}
}
if ((*matstruct)->descr) { stat = hipsparseDestroyMatDescr((*matstruct)->descr);CHKERRCUSPARSE(stat); }
delete (*matstruct)->cprowIndices;
if ((*matstruct)->alpha_one) { err=hipFree((*matstruct)->alpha_one);CHKERRCUDA(err); }
if ((*matstruct)->beta_zero) { err=hipFree((*matstruct)->beta_zero);CHKERRCUDA(err); }
if ((*matstruct)->beta_one) { err=hipFree((*matstruct)->beta_one);CHKERRCUDA(err); }
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
Mat_SeqAIJCUSPARSEMultStruct *mdata = *matstruct;
if (mdata->matDescr) {stat = hipsparseDestroySpMat(mdata->matDescr);CHKERRCUSPARSE(stat);}
for (int i=0; i<3; i++) {
if (mdata->cuSpMV[i].initialized) {
err = hipFree(mdata->cuSpMV[i].spmvBuffer);CHKERRCUDA(err);
stat = hipsparseDestroyDnVec(mdata->cuSpMV[i].vecXDescr);CHKERRCUSPARSE(stat);
stat = hipsparseDestroyDnVec(mdata->cuSpMV[i].vecYDescr);CHKERRCUSPARSE(stat);
}
}
#endif
delete *matstruct;
*matstruct = NULL;
}
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors_p* trifactors)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (*trifactors) {
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->loTriFactorPtr);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->upTriFactorPtr);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->loTriFactorPtrTranspose);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->upTriFactorPtrTranspose);CHKERRQ(ierr);
delete (*trifactors)->rpermIndices;
delete (*trifactors)->cpermIndices;
delete (*trifactors)->workVector;
(*trifactors)->rpermIndices = NULL;
(*trifactors)->cpermIndices = NULL;
(*trifactors)->workVector = NULL;
if ((*trifactors)->a_band_d) {hipError_t cerr = hipFree((*trifactors)->a_band_d);CHKERRCUDA(cerr);}
if ((*trifactors)->i_band_d) {hipError_t cerr = hipFree((*trifactors)->i_band_d);CHKERRCUDA(cerr);}
(*trifactors)->init_dev_prop = PETSC_FALSE;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors** trifactors)
{
PetscErrorCode ierr;
hipsparseHandle_t handle;
hipsparseStatus_t stat;
PetscFunctionBegin;
if (*trifactors) {
ierr = MatSeqAIJCUSPARSETriFactors_Reset(trifactors);CHKERRQ(ierr);
if (handle = (*trifactors)->handle) {
stat = hipsparseDestroy(handle);CHKERRCUSPARSE(stat);
}
ierr = PetscFree(*trifactors);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
struct IJCompare
{
__host__ __device__
inline bool operator() (const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2)
{
if (t1.get<0>() < t2.get<0>()) return true;
if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>();
return false;
}
};
struct IJEqual
{
__host__ __device__
inline bool operator() (const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2)
{
if (t1.get<0>() != t2.get<0>() || t1.get<1>() != t2.get<1>()) return false;
return true;
}
};
struct IJDiff
{
__host__ __device__
inline PetscInt operator() (const PetscInt &t1, const PetscInt &t2)
{
return t1 == t2 ? 0 : 1;
}
};
struct IJSum
{
__host__ __device__
inline PetscInt operator() (const PetscInt &t1, const PetscInt &t2)
{
return t1||t2;
}
};
#include <thrust/iterator/discard_iterator.h>
PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat A, const PetscScalar v[], InsertMode imode)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
THRUSTARRAY *cooPerm_v = NULL;
thrust::device_ptr<const PetscScalar> d_v;
CsrMatrix *matrix;
PetscErrorCode ierr;
PetscInt n;
PetscFunctionBegin;
if (!cusp) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUSPARSE struct");
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUSPARSE CsrMatrix");
if (!cusp->cooPerm) {
ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
matrix = (CsrMatrix*)cusp->mat->mat;
if (!matrix->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
if (!v) {
if (imode == INSERT_VALUES) thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.);
goto finalize;
}
n = cusp->cooPerm->size();
if (isCudaMem(v)) {
d_v = thrust::device_pointer_cast(v);
} else {
cooPerm_v = new THRUSTARRAY(n);
cooPerm_v->assign(v,v+n);
d_v = cooPerm_v->data();
ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr);
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (imode == ADD_VALUES) { /* ADD VALUES means add to existing ones */
if (cusp->cooPerm_a) { /* there are repeated entries in d_v[], and we need to add these them */
THRUSTARRAY *cooPerm_w = new THRUSTARRAY(matrix->values->size());
auto vbit = thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin());
/* thrust::reduce_by_key(keys_first,keys_last,values_first,keys_output,values_output)
cooPerm_a = [0,0,1,2,3,4]. The length is n, number of nonozeros in d_v[].
cooPerm_a is ordered. d_v[i] is the cooPerm_a[i]-th unique nonzero.
*/
thrust::reduce_by_key(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),vbit,thrust::make_discard_iterator(),cooPerm_w->begin(),thrust::equal_to<PetscInt>(),thrust::plus<PetscScalar>());
thrust::transform(cooPerm_w->begin(),cooPerm_w->end(),matrix->values->begin(),matrix->values->begin(),thrust::plus<PetscScalar>());
delete cooPerm_w;
} else {
/* all nonzeros in d_v[] are unique entries */
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()),
matrix->values->begin()));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->end()),
matrix->values->end()));
thrust::for_each(zibit,zieit,VecCUDAPlusEquals()); /* values[i] += d_v[cooPerm[i]] */
}
} else {
if (cusp->cooPerm_a) { /* repeated entries in COO, with INSERT_VALUES -> reduce */
auto vbit = thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin());
thrust::reduce_by_key(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),vbit,thrust::make_discard_iterator(),matrix->values->begin(),thrust::equal_to<PetscInt>(),thrust::plus<PetscScalar>());
} else {
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()),
matrix->values->begin()));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->end()),
matrix->values->end()));
thrust::for_each(zibit,zieit,VecCUDAEquals());
}
}
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
finalize:
delete cooPerm_v;
A->offloadmask = PETSC_OFFLOAD_GPU;
ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr);
/* shorter version of MatAssemblyEnd_SeqAIJ */
ierr = PetscInfo3(A,"Matrix size: %D X %D; storage space: 0 unneeded,%D used\n",A->rmap->n,A->cmap->n,a->nz);CHKERRQ(ierr);
ierr = PetscInfo(A,"Number of mallocs during MatSetValues() is 0\n");CHKERRQ(ierr);
ierr = PetscInfo1(A,"Maximum nonzeros in any row is %D\n",a->rmax);CHKERRQ(ierr);
a->reallocs = 0;
A->info.mallocs += 0;
A->info.nz_unneeded = 0;
A->assembled = A->was_assembled = PETSC_TRUE;
A->num_ass++;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat A, PetscBool destroy)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (!cusp) PetscFunctionReturn(0);
if (destroy) {
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->matTranspose,cusp->format);CHKERRQ(ierr);
delete cusp->csr2csc_i;
cusp->csr2csc_i = NULL;
}
A->transupdated = PETSC_FALSE;
PetscFunctionReturn(0);
}
#include <thrust/binary_search.h>
PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt coo_i[], const PetscInt coo_j[])
{
PetscErrorCode ierr;
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt cooPerm_n, nzr = 0;
hipError_t cerr;
PetscFunctionBegin;
ierr = PetscLayoutSetUp(A->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(A->cmap);CHKERRQ(ierr);
cooPerm_n = cusp->cooPerm ? cusp->cooPerm->size() : 0;
if (n != cooPerm_n) {
delete cusp->cooPerm;
delete cusp->cooPerm_a;
cusp->cooPerm = NULL;
cusp->cooPerm_a = NULL;
}
if (n) {
THRUSTINTARRAY d_i(n);
THRUSTINTARRAY d_j(n);
THRUSTINTARRAY ii(A->rmap->n);
if (!cusp->cooPerm) { cusp->cooPerm = new THRUSTINTARRAY(n); }
if (!cusp->cooPerm_a) { cusp->cooPerm_a = new THRUSTINTARRAY(n); }
ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr);
d_i.assign(coo_i,coo_i+n);
d_j.assign(coo_j,coo_j+n);
/* Ex.
n = 6
coo_i = [3,3,1,4,1,4]
coo_j = [3,2,2,5,2,6]
*/
auto fkey = thrust::make_zip_iterator(thrust::make_tuple(d_i.begin(),d_j.begin()));
auto ekey = thrust::make_zip_iterator(thrust::make_tuple(d_i.end(),d_j.end()));
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
thrust::sequence(thrust::device, cusp->cooPerm->begin(), cusp->cooPerm->end(), 0);
thrust::sort_by_key(fkey, ekey, cusp->cooPerm->begin(), IJCompare()); /* sort by row, then by col */
*cusp->cooPerm_a = d_i; /* copy the sorted array */
THRUSTINTARRAY w = d_j;
/*
d_i = [1,1,3,3,4,4]
d_j = [2,2,2,3,5,6]
cooPerm = [2,4,1,0,3,5]
*/
auto nekey = thrust::unique(fkey, ekey, IJEqual()); /* unique (d_i, d_j) */
/*
d_i = [1,3,3,4,4,x]
^ekey
d_j = [2,2,3,5,6,x]
^nekye
*/
if (nekey == ekey) { /* all entries are unique */
delete cusp->cooPerm_a;
cusp->cooPerm_a = NULL;
} else { /* Stefano: I couldn't come up with a more elegant algorithm */
/* idea: any change in i or j in the (i,j) sequence implies a new nonzero */
adjacent_difference(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),cusp->cooPerm_a->begin(),IJDiff()); /* cooPerm_a: [1,1,3,3,4,4] => [1,0,1,0,1,0]*/
adjacent_difference(w.begin(),w.end(),w.begin(),IJDiff()); /* w: [2,2,2,3,5,6] => [2,0,0,1,1,1]*/
(*cusp->cooPerm_a)[0] = 0; /* clear the first entry, though accessing an entry on device implies a hipMemcpy */
w[0] = 0;
thrust::transform(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),w.begin(),cusp->cooPerm_a->begin(),IJSum()); /* cooPerm_a = [0,0,1,1,1,1]*/
thrust::inclusive_scan(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),cusp->cooPerm_a->begin(),thrust::plus<PetscInt>()); /*cooPerm_a=[0,0,1,2,3,4]*/
}
thrust::counting_iterator<PetscInt> search_begin(0);
thrust::upper_bound(d_i.begin(), nekey.get_iterator_tuple().get<0>(), /* binary search entries of [0,1,2,3,4,5,6) in ordered array d_i = [1,3,3,4,4], supposing A->rmap->n = 6. */
search_begin, search_begin + A->rmap->n, /* return in ii[] the index of last position in d_i[] where value could be inserted without violating the ordering */
ii.begin()); /* ii = [0,1,1,3,5,5]. A leading 0 will be added later */
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatSeqXAIJFreeAIJ(A,&a->a,&a->j,&a->i);CHKERRQ(ierr);
a->singlemalloc = PETSC_FALSE;
a->free_a = PETSC_TRUE;
a->free_ij = PETSC_TRUE;
ierr = PetscMalloc1(A->rmap->n+1,&a->i);CHKERRQ(ierr);
a->i[0] = 0; /* a->i = [0,0,1,1,3,5,5] */
cerr = hipMemcpy(a->i+1,ii.data().get(),A->rmap->n*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
a->nz = a->maxnz = a->i[A->rmap->n];
a->rmax = 0;
ierr = PetscMalloc1(a->nz,&a->a);CHKERRQ(ierr);
ierr = PetscMalloc1(a->nz,&a->j);CHKERRQ(ierr);
cerr = hipMemcpy(a->j,d_j.data().get(),a->nz*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
if (!a->ilen) { ierr = PetscMalloc1(A->rmap->n,&a->ilen);CHKERRQ(ierr); }
if (!a->imax) { ierr = PetscMalloc1(A->rmap->n,&a->imax);CHKERRQ(ierr); }
for (PetscInt i = 0; i < A->rmap->n; i++) {
const PetscInt nnzr = a->i[i+1] - a->i[i];
nzr += (PetscInt)!!(nnzr);
a->ilen[i] = a->imax[i] = nnzr;
a->rmax = PetscMax(a->rmax,nnzr);
}
a->nonzerorowcnt = nzr;
A->preallocated = PETSC_TRUE;
ierr = PetscLogGpuToCpu((A->rmap->n+a->nz)*sizeof(PetscInt));CHKERRQ(ierr);
ierr = MatMarkDiagonal_SeqAIJ(A);CHKERRQ(ierr);
} else {
ierr = MatSeqAIJSetPreallocation(A,0,NULL);CHKERRQ(ierr);
}
ierr = MatSetOption(A,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr);
/* We want to allocate the CUSPARSE struct for matvec now.
The code is so convoluted now that I prefer to copy zeros */
ierr = PetscArrayzero(a->a,a->nz);CHKERRQ(ierr);
ierr = MatCheckCompressedRow(A,nzr,&a->compressedrow,a->i,A->rmap->n,0.6);CHKERRQ(ierr);
A->offloadmask = PETSC_OFFLOAD_CPU;
A->nonzerostate++;
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr);
A->assembled = PETSC_FALSE;
A->was_assembled = PETSC_FALSE;
PetscFunctionReturn(0);
}
/*@C
MatSeqAIJCUSPARSEGetIJ - returns the device row storage i and j indices for MATSEQAIJCUSPARSE matrices.
Not collective
Input Parameters:
+ A - the matrix
- compressed - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be always returned in compressed form
Output Parameters:
+ ia - the CSR row pointers
- ja - the CSR column indices
Level: developer
Notes:
When compressed is true, the CSR structure does not contain empty rows
.seealso: MatSeqAIJCUSPARSERestoreIJ(), MatSeqAIJCUSPARSEGetArrayRead()
@*/
PetscErrorCode MatSeqAIJCUSPARSEGetIJ(Mat A, PetscBool compressed, const int** i, const int **j)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
CsrMatrix *csr;
PetscErrorCode ierr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
if (!i || !j) PetscFunctionReturn(0);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix*)cusp->mat->mat;
if (i) {
if (!compressed && a->compressedrow.use) { /* need full row offset */
if (!cusp->rowoffsets_gpu) {
cusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1);
cusp->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1);
ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
*i = cusp->rowoffsets_gpu->data().get();
} else *i = csr->row_offsets->data().get();
}
if (j) *j = csr->column_indices->data().get();
PetscFunctionReturn(0);
}
/*@C
MatSeqAIJCUSPARSERestoreIJ - restore the device row storage i and j indices obtained with MatSeqAIJCUSPARSEGetIJ()
Not collective
Input Parameters:
+ A - the matrix
- compressed - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be always returned in compressed form
Output Parameters:
+ ia - the CSR row pointers
- ja - the CSR column indices
Level: developer
.seealso: MatSeqAIJCUSPARSEGetIJ()
@*/
PetscErrorCode MatSeqAIJCUSPARSERestoreIJ(Mat A, PetscBool compressed, const int** i, const int **j)
{
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (i) *i = NULL;
if (j) *j = NULL;
PetscFunctionReturn(0);
}
/*@C
MatSeqAIJCUSPARSEGetArrayRead - gives read-only access to the array where the device data for a MATSEQAIJCUSPARSE matrix is stored
Not Collective
Input Parameter:
. A - a MATSEQAIJCUSPARSE matrix
Output Parameter:
. a - pointer to the device data
Level: developer
Notes: may trigger host-device copies if up-to-date matrix data is on host
.seealso: MatSeqAIJCUSPARSEGetArray(), MatSeqAIJCUSPARSEGetArrayWrite(), MatSeqAIJCUSPARSERestoreArrayRead()
@*/
PetscErrorCode MatSeqAIJCUSPARSEGetArrayRead(Mat A, const PetscScalar** a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
CsrMatrix *csr;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix*)cusp->mat->mat;
if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
*a = csr->values->data().get();
PetscFunctionReturn(0);
}
/*@C
MatSeqAIJCUSPARSERestoreArrayRead - restore the read-only access array obtained from MatSeqAIJCUSPARSEGetArrayRead()
Not Collective
Input Parameter:
. A - a MATSEQAIJCUSPARSE matrix
Output Parameter:
. a - pointer to the device data
Level: developer
.seealso: MatSeqAIJCUSPARSEGetArrayRead()
@*/
PetscErrorCode MatSeqAIJCUSPARSERestoreArrayRead(Mat A, const PetscScalar** a)
{
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
*a = NULL;
PetscFunctionReturn(0);
}
/*@C
MatSeqAIJCUSPARSEGetArray - gives read-write access to the array where the device data for a MATSEQAIJCUSPARSE matrix is stored
Not Collective
Input Parameter:
. A - a MATSEQAIJCUSPARSE matrix
Output Parameter:
. a - pointer to the device data
Level: developer
Notes: may trigger host-device copies if up-to-date matrix data is on host
.seealso: MatSeqAIJCUSPARSEGetArrayRead(), MatSeqAIJCUSPARSEGetArrayWrite(), MatSeqAIJCUSPARSERestoreArray()
@*/
PetscErrorCode MatSeqAIJCUSPARSEGetArray(Mat A, PetscScalar** a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
CsrMatrix *csr;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix*)cusp->mat->mat;
if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
*a = csr->values->data().get();
A->offloadmask = PETSC_OFFLOAD_GPU;
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@C
MatSeqAIJCUSPARSERestoreArray - restore the read-write access array obtained from MatSeqAIJCUSPARSEGetArray()
Not Collective
Input Parameter:
. A - a MATSEQAIJCUSPARSE matrix
Output Parameter:
. a - pointer to the device data
Level: developer
.seealso: MatSeqAIJCUSPARSEGetArray()
@*/
PetscErrorCode MatSeqAIJCUSPARSERestoreArray(Mat A, PetscScalar** a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr);
*a = NULL;
PetscFunctionReturn(0);
}
/*@C
MatSeqAIJCUSPARSEGetArrayWrite - gives write access to the array where the device data for a MATSEQAIJCUSPARSE matrix is stored
Not Collective
Input Parameter:
. A - a MATSEQAIJCUSPARSE matrix
Output Parameter:
. a - pointer to the device data
Level: developer
Notes: does not trigger host-device copies and flags data validity on the GPU
.seealso: MatSeqAIJCUSPARSEGetArray(), MatSeqAIJCUSPARSEGetArrayRead(), MatSeqAIJCUSPARSERestoreArrayWrite()
@*/
PetscErrorCode MatSeqAIJCUSPARSEGetArrayWrite(Mat A, PetscScalar** a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
CsrMatrix *csr;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix*)cusp->mat->mat;
if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
*a = csr->values->data().get();
A->offloadmask = PETSC_OFFLOAD_GPU;
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@C
MatSeqAIJCUSPARSERestoreArrayWrite - restore the write-only access array obtained from MatSeqAIJCUSPARSEGetArrayWrite()
Not Collective
Input Parameter:
. A - a MATSEQAIJCUSPARSE matrix
Output Parameter:
. a - pointer to the device data
Level: developer
.seealso: MatSeqAIJCUSPARSEGetArrayWrite()
@*/
PetscErrorCode MatSeqAIJCUSPARSERestoreArrayWrite(Mat A, PetscScalar** a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr);
*a = NULL;
PetscFunctionReturn(0);
}
struct IJCompare4
{
__host__ __device__
inline bool operator() (const thrust::tuple<int, int, PetscScalar, int> &t1, const thrust::tuple<int, int, PetscScalar, int> &t2)
{
if (t1.get<0>() < t2.get<0>()) return true;
if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>();
return false;
}
};
struct Shift
{
int _shift;
Shift(int shift) : _shift(shift) {}
__host__ __device__
inline int operator() (const int &c)
{
return c + _shift;
}
};
/* merges two SeqAIJCUSPARSE matrices A, B by concatenating their rows. [A';B']' operation in matlab notation */
PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat A,Mat B,MatReuse reuse,Mat* C)
{
PetscErrorCode ierr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data, *b = (Mat_SeqAIJ*)B->data, *c;
Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr, *Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr, *Ccusp;
Mat_SeqAIJCUSPARSEMultStruct *Cmat;
CsrMatrix *Acsr,*Bcsr,*Ccsr;
PetscInt Annz,Bnnz;
hipsparseStatus_t stat;
PetscInt i,m,n,zero = 0;
hipError_t cerr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidHeaderSpecific(B,MAT_CLASSID,2);
PetscValidPointer(C,4);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
PetscCheckTypeName(B,MATSEQAIJCUSPARSE);
if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Invalid number or rows %D != %D",A->rmap->n,B->rmap->n);
if (reuse == MAT_INPLACE_MATRIX) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_INPLACE_MATRIX not supported");
if (Acusp->format == MAT_CUSPARSE_ELL || Acusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (Bcusp->format == MAT_CUSPARSE_ELL || Bcusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (reuse == MAT_INITIAL_MATRIX) {
m = A->rmap->n;
n = A->cmap->n + B->cmap->n;
ierr = MatCreate(PETSC_COMM_SELF,C);CHKERRQ(ierr);
ierr = MatSetSizes(*C,m,n,m,n);CHKERRQ(ierr);
ierr = MatSetType(*C,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
c = (Mat_SeqAIJ*)(*C)->data;
Ccusp = (Mat_SeqAIJCUSPARSE*)(*C)->spptr;
Cmat = new Mat_SeqAIJCUSPARSEMultStruct;
Ccsr = new CsrMatrix;
Cmat->cprowIndices = NULL;
c->compressedrow.use = PETSC_FALSE;
c->compressedrow.nrows = 0;
c->compressedrow.i = NULL;
c->compressedrow.rindex = NULL;
Ccusp->workVector = NULL;
Ccusp->nrows = m;
Ccusp->mat = Cmat;
Ccusp->mat->mat = Ccsr;
Ccsr->num_rows = m;
Ccsr->num_cols = n;
stat = hipsparseCreateMatDescr(&Cmat->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(Cmat->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatType(Cmat->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
cerr = hipMalloc((void **)&(Cmat->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&(Cmat->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMemcpy(Cmat->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMemcpy(Cmat->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(B);CHKERRQ(ierr);
if (!Acusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
if (!Bcusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
Acsr = (CsrMatrix*)Acusp->mat->mat;
Bcsr = (CsrMatrix*)Bcusp->mat->mat;
Annz = (PetscInt)Acsr->column_indices->size();
Bnnz = (PetscInt)Bcsr->column_indices->size();
c->nz = Annz + Bnnz;
Ccsr->row_offsets = new THRUSTINTARRAY32(m+1);
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
Ccsr->values = new THRUSTARRAY(c->nz);
Ccsr->num_entries = c->nz;
Ccusp->cooPerm = new THRUSTINTARRAY(c->nz);
if (c->nz) {
auto Acoo = new THRUSTINTARRAY32(Annz);
auto Bcoo = new THRUSTINTARRAY32(Bnnz);
auto Ccoo = new THRUSTINTARRAY32(c->nz);
THRUSTINTARRAY32 *Aroff,*Broff;
if (a->compressedrow.use) { /* need full row offset */
if (!Acusp->rowoffsets_gpu) {
Acusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1);
Acusp->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1);
ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
Aroff = Acusp->rowoffsets_gpu;
} else Aroff = Acsr->row_offsets;
if (b->compressedrow.use) { /* need full row offset */
if (!Bcusp->rowoffsets_gpu) {
Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1);
Bcusp->rowoffsets_gpu->assign(b->i,b->i + B->rmap->n + 1);
ierr = PetscLogCpuToGpu((B->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
Broff = Bcusp->rowoffsets_gpu;
} else Broff = Bcsr->row_offsets;
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
stat = hipsparseXcsr2coo(Acusp->handle,
Aroff->data().get(),
Annz,
m,
Acoo->data().get(),
HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = hipsparseXcsr2coo(Bcusp->handle,
Broff->data().get(),
Bnnz,
m,
Bcoo->data().get(),
HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
/* Issues when using bool with large matrices on SUMMIT 10.2.89 */
auto Aperm = thrust::make_constant_iterator(1);
auto Bperm = thrust::make_constant_iterator(0);
#if PETSC_PKG_CUDA_VERSION_GE(10,0,0)
auto Bcib = thrust::make_transform_iterator(Bcsr->column_indices->begin(),Shift(A->cmap->n));
auto Bcie = thrust::make_transform_iterator(Bcsr->column_indices->end(),Shift(A->cmap->n));
#else
/* there are issues instantiating the merge operation using a transform iterator for the columns of B */
auto Bcib = Bcsr->column_indices->begin();
auto Bcie = Bcsr->column_indices->end();
thrust::transform(Bcib,Bcie,Bcib,Shift(A->cmap->n));
#endif
auto wPerm = new THRUSTINTARRAY32(Annz+Bnnz);
auto Azb = thrust::make_zip_iterator(thrust::make_tuple(Acoo->begin(),Acsr->column_indices->begin(),Acsr->values->begin(),Aperm));
auto Aze = thrust::make_zip_iterator(thrust::make_tuple(Acoo->end(),Acsr->column_indices->end(),Acsr->values->end(),Aperm));
auto Bzb = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->begin(),Bcib,Bcsr->values->begin(),Bperm));
auto Bze = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->end(),Bcie,Bcsr->values->end(),Bperm));
auto Czb = thrust::make_zip_iterator(thrust::make_tuple(Ccoo->begin(),Ccsr->column_indices->begin(),Ccsr->values->begin(),wPerm->begin()));
auto p1 = Ccusp->cooPerm->begin();
auto p2 = Ccusp->cooPerm->begin();
thrust::advance(p2,Annz);
PetscStackCallThrust(thrust::merge(thrust::device,Azb,Aze,Bzb,Bze,Czb,IJCompare4()));
#if PETSC_PKG_CUDA_VERSION_LT(10,0,0)
thrust::transform(Bcib,Bcie,Bcib,Shift(-A->cmap->n));
#endif
auto cci = thrust::make_counting_iterator(zero);
auto cce = thrust::make_counting_iterator(c->nz);
#if 0 //Errors on SUMMIT cuda 11.1.0
PetscStackCallThrust(thrust::partition_copy(thrust::device,cci,cce,wPerm->begin(),p1,p2,thrust::identity<int>()));
#else
auto pred = thrust::identity<int>();
PetscStackCallThrust(thrust::copy_if(thrust::device,cci,cce,wPerm->begin(),p1,pred));
PetscStackCallThrust(thrust::remove_copy_if(thrust::device,cci,cce,wPerm->begin(),p2,pred));
#endif
stat = hipsparseXcoo2csr(Ccusp->handle,
Ccoo->data().get(),
c->nz,
m,
Ccsr->row_offsets->data().get(),
HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
delete wPerm;
delete Acoo;
delete Bcoo;
delete Ccoo;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = hipsparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, Ccsr->num_entries,
Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get(),
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
#endif
if (A->form_explicit_transpose && B->form_explicit_transpose) { /* if A and B have the transpose, generate C transpose too */
PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE;
Mat_SeqAIJCUSPARSEMultStruct *CmatT = new Mat_SeqAIJCUSPARSEMultStruct;
CsrMatrix *CcsrT = new CsrMatrix;
CsrMatrix *AcsrT = AT ? (CsrMatrix*)Acusp->matTranspose->mat : NULL;
CsrMatrix *BcsrT = BT ? (CsrMatrix*)Bcusp->matTranspose->mat : NULL;
(*C)->form_explicit_transpose = PETSC_TRUE;
(*C)->transupdated = PETSC_TRUE;
Ccusp->rowoffsets_gpu = NULL;
CmatT->cprowIndices = NULL;
CmatT->mat = CcsrT;
CcsrT->num_rows = n;
CcsrT->num_cols = m;
CcsrT->num_entries = c->nz;
CcsrT->row_offsets = new THRUSTINTARRAY32(n+1);
CcsrT->column_indices = new THRUSTINTARRAY32(c->nz);
CcsrT->values = new THRUSTARRAY(c->nz);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
auto rT = CcsrT->row_offsets->begin();
if (AT) {
rT = thrust::copy(AcsrT->row_offsets->begin(),AcsrT->row_offsets->end(),rT);
thrust::advance(rT,-1);
}
if (BT) {
auto titb = thrust::make_transform_iterator(BcsrT->row_offsets->begin(),Shift(a->nz));
auto tite = thrust::make_transform_iterator(BcsrT->row_offsets->end(),Shift(a->nz));
thrust::copy(titb,tite,rT);
}
auto cT = CcsrT->column_indices->begin();
if (AT) cT = thrust::copy(AcsrT->column_indices->begin(),AcsrT->column_indices->end(),cT);
if (BT) thrust::copy(BcsrT->column_indices->begin(),BcsrT->column_indices->end(),cT);
auto vT = CcsrT->values->begin();
if (AT) vT = thrust::copy(AcsrT->values->begin(),AcsrT->values->end(),vT);
if (BT) thrust::copy(BcsrT->values->begin(),BcsrT->values->end(),vT);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
stat = hipsparseCreateMatDescr(&CmatT->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(CmatT->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatType(CmatT->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
cerr = hipMalloc((void **)&(CmatT->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&(CmatT->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&(CmatT->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMemcpy(CmatT->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMemcpy(CmatT->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMemcpy(CmatT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = hipsparseCreateCsr(&CmatT->matDescr, CcsrT->num_rows, CcsrT->num_cols, CcsrT->num_entries,
CcsrT->row_offsets->data().get(), CcsrT->column_indices->data().get(), CcsrT->values->data().get(),
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
#endif
Ccusp->matTranspose = CmatT;
}
}
c->singlemalloc = PETSC_FALSE;
c->free_a = PETSC_TRUE;
c->free_ij = PETSC_TRUE;
ierr = PetscMalloc1(m+1,&c->i);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->j);CHKERRQ(ierr);
if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */
THRUSTINTARRAY ii(Ccsr->row_offsets->size());
THRUSTINTARRAY jj(Ccsr->column_indices->size());
ii = *Ccsr->row_offsets;
jj = *Ccsr->column_indices;
cerr = hipMemcpy(c->i,ii.data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = hipMemcpy(c->j,jj.data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
} else {
cerr = hipMemcpy(c->i,Ccsr->row_offsets->data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = hipMemcpy(c->j,Ccsr->column_indices->data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
ierr = PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size())*sizeof(PetscInt));CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->ilen);CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->imax);CHKERRQ(ierr);
c->maxnz = c->nz;
c->nonzerorowcnt = 0;
c->rmax = 0;
for (i = 0; i < m; i++) {
const PetscInt nn = c->i[i+1] - c->i[i];
c->ilen[i] = c->imax[i] = nn;
c->nonzerorowcnt += (PetscInt)!!nn;
c->rmax = PetscMax(c->rmax,nn);
}
ierr = MatMarkDiagonal_SeqAIJ(*C);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->a);CHKERRQ(ierr);
(*C)->nonzerostate++;
ierr = PetscLayoutSetUp((*C)->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp((*C)->cmap);CHKERRQ(ierr);
Ccusp->nonzerostate = (*C)->nonzerostate;
(*C)->preallocated = PETSC_TRUE;
} else {
if ((*C)->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Invalid number or rows %D != %D",(*C)->rmap->n,B->rmap->n);
c = (Mat_SeqAIJ*)(*C)->data;
if (c->nz) {
Ccusp = (Mat_SeqAIJCUSPARSE*)(*C)->spptr;
if (!Ccusp->cooPerm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cooPerm");
if (Ccusp->format == MAT_CUSPARSE_ELL || Ccusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (Ccusp->nonzerostate != (*C)->nonzerostate) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Wrong nonzerostate");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
if (!Acusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
if (!Bcusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
Acsr = (CsrMatrix*)Acusp->mat->mat;
Bcsr = (CsrMatrix*)Bcusp->mat->mat;
Ccsr = (CsrMatrix*)Ccusp->mat->mat;
if (Acsr->num_entries != (PetscInt)Acsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"A nnz %D != %D",Acsr->num_entries,(PetscInt)Acsr->values->size());
if (Bcsr->num_entries != (PetscInt)Bcsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"B nnz %D != %D",Bcsr->num_entries,(PetscInt)Bcsr->values->size());
if (Ccsr->num_entries != (PetscInt)Ccsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"C nnz %D != %D",Ccsr->num_entries,(PetscInt)Ccsr->values->size());
if (Ccsr->num_entries != Acsr->num_entries + Bcsr->num_entries) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_COR,"C nnz %D != %D + %D",Ccsr->num_entries,Acsr->num_entries,Bcsr->num_entries);
if (Ccusp->cooPerm->size() != Ccsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"permSize %D != %D",(PetscInt)Ccusp->cooPerm->size(),(PetscInt)Ccsr->values->size());
auto pmid = Ccusp->cooPerm->begin();
thrust::advance(pmid,Acsr->num_entries);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
auto zibait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->begin(),
thrust::make_permutation_iterator(Ccsr->values->begin(),Ccusp->cooPerm->begin())));
auto zieait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->end(),
thrust::make_permutation_iterator(Ccsr->values->begin(),pmid)));
thrust::for_each(zibait,zieait,VecCUDAEquals());
auto zibbit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->begin(),
thrust::make_permutation_iterator(Ccsr->values->begin(),pmid)));
auto ziebit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->end(),
thrust::make_permutation_iterator(Ccsr->values->begin(),Ccusp->cooPerm->end())));
thrust::for_each(zibbit,ziebit,VecCUDAEquals());
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(*C,PETSC_FALSE);CHKERRQ(ierr);
if (A->form_explicit_transpose && B->form_explicit_transpose && (*C)->form_explicit_transpose) {
if (!Ccusp->matTranspose) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing transpose Mat_SeqAIJCUSPARSEMultStruct");
PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE;
CsrMatrix *AcsrT = AT ? (CsrMatrix*)Acusp->matTranspose->mat : NULL;
CsrMatrix *BcsrT = BT ? (CsrMatrix*)Bcusp->matTranspose->mat : NULL;
CsrMatrix *CcsrT = (CsrMatrix*)Ccusp->matTranspose->mat;
auto vT = CcsrT->values->begin();
if (AT) vT = thrust::copy(AcsrT->values->begin(),AcsrT->values->end(),vT);
if (BT) thrust::copy(BcsrT->values->begin(),BcsrT->values->end(),vT);
(*C)->transupdated = PETSC_TRUE;
}
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
}
}
ierr = PetscObjectStateIncrease((PetscObject)*C);CHKERRQ(ierr);
(*C)->assembled = PETSC_TRUE;
(*C)->was_assembled = PETSC_FALSE;
(*C)->offloadmask = PETSC_OFFLOAD_GPU;
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[])
{
PetscErrorCode ierr;
bool dmem;
const PetscScalar *av;
hipError_t cerr;
PetscFunctionBegin;
dmem = isCudaMem(v);
ierr = MatSeqAIJCUSPARSEGetArrayRead(A,&av);CHKERRQ(ierr);
if (n && idx) {
THRUSTINTARRAY widx(n);
widx.assign(idx,idx+n);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
THRUSTARRAY *w = NULL;
thrust::device_ptr<PetscScalar> dv;
if (dmem) {
dv = thrust::device_pointer_cast(v);
} else {
w = new THRUSTARRAY(n);
dv = w->data();
}
thrust::device_ptr<const PetscScalar> dav = thrust::device_pointer_cast(av);
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav,widx.begin()),dv));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav,widx.end()),dv+n));
thrust::for_each(zibit,zieit,VecCUDAEquals());
if (w) {
cerr = hipMemcpy(v,w->data().get(),n*sizeof(PetscScalar),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
delete w;
} else {
cerr = hipMemcpy(v,av,n*sizeof(PetscScalar),dmem ? hipMemcpyDeviceToDevice : hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
if (!dmem) { ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr); }
ierr = MatSeqAIJCUSPARSERestoreArrayRead(A,&av);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
| 6436f7a4383a42df843ef44c7c7ede65b59ac8f7.cu | /*
Defines the basic matrix operations for the AIJ (compressed row)
matrix storage format using the CUSPARSE library,
*/
#define PETSC_SKIP_SPINLOCK
#define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1
#include <petscconf.h>
#include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
#include <../src/mat/impls/sbaij/seq/sbaij.h>
#include <../src/vec/vec/impls/dvecimpl.h>
#include <petsc/private/vecimpl.h>
#undef VecType
#include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h>
#include <thrust/async/for_each.h>
const char *const MatCUSPARSEStorageFormats[] = {"CSR","ELL","HYB","MatCUSPARSEStorageFormat","MAT_CUSPARSE_",0};
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
/* The following are copied from cusparse.h in CUDA-11.0. In MatCUSPARSESpMVAlgorithms[] etc, we copy them in
0-based integer value order, since we want to use PetscOptionsEnum() to parse user command line options for them.
typedef enum {
CUSPARSE_MV_ALG_DEFAULT = 0,
CUSPARSE_COOMV_ALG = 1,
CUSPARSE_CSRMV_ALG1 = 2,
CUSPARSE_CSRMV_ALG2 = 3
} cusparseSpMVAlg_t;
typedef enum {
CUSPARSE_MM_ALG_DEFAULT CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_ALG_DEFAULT) = 0,
CUSPARSE_COOMM_ALG1 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG1) = 1,
CUSPARSE_COOMM_ALG2 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG2) = 2,
CUSPARSE_COOMM_ALG3 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG3) = 3,
CUSPARSE_CSRMM_ALG1 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_CSR_ALG1) = 4,
CUSPARSE_SPMM_ALG_DEFAULT = 0,
CUSPARSE_SPMM_COO_ALG1 = 1,
CUSPARSE_SPMM_COO_ALG2 = 2,
CUSPARSE_SPMM_COO_ALG3 = 3,
CUSPARSE_SPMM_COO_ALG4 = 5,
CUSPARSE_SPMM_CSR_ALG1 = 4,
CUSPARSE_SPMM_CSR_ALG2 = 6,
} cusparseSpMMAlg_t;
typedef enum {
CUSPARSE_CSR2CSC_ALG1 = 1, // faster than V2 (in general), deterministc
CUSPARSE_CSR2CSC_ALG2 = 2 // low memory requirement, non-deterministc
} cusparseCsr2CscAlg_t;
*/
const char *const MatCUSPARSESpMVAlgorithms[] = {"MV_ALG_DEFAULT","COOMV_ALG", "CSRMV_ALG1","CSRMV_ALG2", "cusparseSpMVAlg_t","CUSPARSE_",0};
const char *const MatCUSPARSESpMMAlgorithms[] = {"ALG_DEFAULT","COO_ALG1","COO_ALG2","COO_ALG3","CSR_ALG1","COO_ALG4","CSR_ALG2","cusparseSpMMAlg_t","CUSPARSE_SPMM_",0};
const char *const MatCUSPARSECsr2CscAlgorithms[] = {"INVALID"/*cusparse does not have enum 0! We created one*/,"ALG1","ALG2","cusparseCsr2CscAlg_t","CUSPARSE_CSR2CSC_",0};
#endif
static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*);
static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*);
static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*);
static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*);
static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*);
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*);
static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec);
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec);
static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat);
static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat,PetscScalar,Mat,MatStructure);
static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat,PetscScalar);
static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec);
static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec);
static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec);
static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec,PetscBool,PetscBool);
static PetscErrorCode CsrMatrix_Destroy(CsrMatrix**);
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct**);
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct**,MatCUSPARSEStorageFormat);
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors**);
static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE**);
PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat);
static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat);
static PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat,PetscBool);
PETSC_INTERN PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],const PetscInt[]);
PETSC_INTERN PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat,const PetscScalar[],InsertMode);
static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],PetscScalar[]);
PetscErrorCode MatCUSPARSESetStream(Mat A,const cudaStream_t stream)
{
cusparseStatus_t stat;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
if (!cusparsestruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr");
cusparsestruct->stream = stream;
stat = cusparseSetStream(cusparsestruct->handle,cusparsestruct->stream);CHKERRCUSPARSE(stat);
PetscFunctionReturn(0);
}
PetscErrorCode MatCUSPARSESetHandle(Mat A,const cusparseHandle_t handle)
{
cusparseStatus_t stat;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
if (!cusparsestruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr");
if (cusparsestruct->handle != handle) {
if (cusparsestruct->handle) {
stat = cusparseDestroy(cusparsestruct->handle);CHKERRCUSPARSE(stat);
}
cusparsestruct->handle = handle;
}
stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
PetscFunctionReturn(0);
}
PetscErrorCode MatCUSPARSEClearHandle(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscBool flg;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg || !cusparsestruct) PetscFunctionReturn(0);
if (cusparsestruct->handle) cusparsestruct->handle = 0;
PetscFunctionReturn(0);
}
PetscErrorCode MatFactorGetSolverType_seqaij_cusparse(Mat A,MatSolverType *type)
{
PetscFunctionBegin;
*type = MATSOLVERCUSPARSE;
PetscFunctionReturn(0);
}
/*MC
MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices
on a single GPU of type, seqaijcusparse, aijcusparse, or seqaijcusp, aijcusp. Currently supported
algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer
performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the
CUSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these
algorithms are not recommended. This class does NOT support direct solver operations.
Level: beginner
.seealso: PCFactorSetMatSolverType(), MatSolverType, MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation
M*/
PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A,MatFactorType ftype,Mat *B)
{
PetscErrorCode ierr;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr);
ierr = MatSetSizes(*B,n,n,n,n);CHKERRQ(ierr);
(*B)->factortype = ftype;
ierr = MatSetType(*B,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) {
ierr = MatSetBlockSizesFromMats(*B,A,A);CHKERRQ(ierr);
(*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE;
(*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE;
ierr = PetscStrallocpy(MATORDERINGND,(char**)&(*B)->preferredordering[MAT_FACTOR_LU]);CHKERRQ(ierr);
ierr = PetscStrallocpy(MATORDERINGNATURAL,(char**)&(*B)->preferredordering[MAT_FACTOR_ILU]);CHKERRQ(ierr);
ierr = PetscStrallocpy(MATORDERINGNATURAL,(char**)&(*B)->preferredordering[MAT_FACTOR_ILUDT]);CHKERRQ(ierr);
} else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) {
(*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE;
(*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE;
ierr = PetscStrallocpy(MATORDERINGND,(char**)&(*B)->preferredordering[MAT_FACTOR_CHOLESKY]);CHKERRQ(ierr);
ierr = PetscStrallocpy(MATORDERINGNATURAL,(char**)&(*B)->preferredordering[MAT_FACTOR_ICC]);CHKERRQ(ierr);
} else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Factor type not supported for CUSPARSE Matrix Types");
ierr = MatSeqAIJSetPreallocation(*B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr);
(*B)->canuseordering = PETSC_TRUE;
ierr = PetscObjectComposeFunction((PetscObject)(*B),"MatFactorGetSolverType_C",MatFactorGetSolverType_seqaij_cusparse);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
switch (op) {
case MAT_CUSPARSE_MULT:
cusparsestruct->format = format;
break;
case MAT_CUSPARSE_ALL:
cusparsestruct->format = format;
break;
default:
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.",op);
}
PetscFunctionReturn(0);
}
/*@
MatCUSPARSESetFormat - Sets the storage format of CUSPARSE matrices for a particular
operation. Only the MatMult operation can use different GPU storage formats
for MPIAIJCUSPARSE matrices.
Not Collective
Input Parameters:
+ A - Matrix of type SEQAIJCUSPARSE
. op - MatCUSPARSEFormatOperation. SEQAIJCUSPARSE matrices support MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL. MPIAIJCUSPARSE matrices support MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_OFFDIAG, and MAT_CUSPARSE_ALL.
- format - MatCUSPARSEStorageFormat (one of MAT_CUSPARSE_CSR, MAT_CUSPARSE_ELL, MAT_CUSPARSE_HYB. The latter two require CUDA 4.2)
Output Parameter:
Level: intermediate
.seealso: MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation
@*/
PetscErrorCode MatCUSPARSESetFormat(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID,1);
ierr = PetscTryMethod(A,"MatCUSPARSESetFormat_C",(Mat,MatCUSPARSEFormatOperation,MatCUSPARSEStorageFormat),(A,op,format));CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatSetOption_SeqAIJCUSPARSE(Mat A,MatOption op,PetscBool flg)
{
PetscErrorCode ierr;
PetscFunctionBegin;
switch (op) {
case MAT_FORM_EXPLICIT_TRANSPOSE:
/* need to destroy the transpose matrix if present to prevent from logic errors if flg is set to true later */
if (A->form_explicit_transpose && !flg) {ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr);}
A->form_explicit_transpose = flg;
break;
default:
ierr = MatSetOption_SeqAIJ(A,op,flg);CHKERRQ(ierr);
break;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A);
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info)
{
Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
IS isrow = b->row,iscol = b->col;
PetscBool row_identity,col_identity;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
ierr = MatLUFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr);
B->offloadmask = PETSC_OFFLOAD_CPU;
/* determine which version of MatSolve needs to be used. */
ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr);
ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr);
if (row_identity && col_identity) {
B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
} else {
B->ops->solve = MatSolve_SeqAIJCUSPARSE;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
}
/* get the triangular factors */
ierr = MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A)
{
PetscErrorCode ierr;
MatCUSPARSEStorageFormat format;
PetscBool flg;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
ierr = PetscOptionsHead(PetscOptionsObject,"SeqAIJCUSPARSE options");CHKERRQ(ierr);
if (A->factortype == MAT_FACTOR_NONE) {
ierr = PetscOptionsEnum("-mat_cusparse_mult_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV",
"MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT,format);CHKERRQ(ierr);}
ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve",
"MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr);}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
ierr = PetscOptionsEnum("-mat_cusparse_spmv_alg","sets cuSPARSE algorithm used in sparse-mat dense-vector multiplication (SpMV)",
"cusparseSpMVAlg_t",MatCUSPARSESpMVAlgorithms,(PetscEnum)cusparsestruct->spmvAlg,(PetscEnum*)&cusparsestruct->spmvAlg,&flg);CHKERRQ(ierr);
/* If user did use this option, check its consistency with cuSPARSE, since PetscOptionsEnum() sets enum values based on their position in MatCUSPARSESpMVAlgorithms[] */
#if PETSC_PKG_CUDA_VERSION_GE(11,4,0)
if (flg && CUSPARSE_SPMV_CSR_ALG1 != 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum cusparseSpMVAlg_t has been changed but PETSc has not been updated accordingly");
#else
if (flg && CUSPARSE_CSRMV_ALG1 != 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum cusparseSpMVAlg_t has been changed but PETSc has not been updated accordingly");
#endif
ierr = PetscOptionsEnum("-mat_cusparse_spmm_alg","sets cuSPARSE algorithm used in sparse-mat dense-mat multiplication (SpMM)",
"cusparseSpMMAlg_t",MatCUSPARSESpMMAlgorithms,(PetscEnum)cusparsestruct->spmmAlg,(PetscEnum*)&cusparsestruct->spmmAlg,&flg);CHKERRQ(ierr);
if (flg && CUSPARSE_SPMM_CSR_ALG1 != 4) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum cusparseSpMMAlg_t has been changed but PETSc has not been updated accordingly");
ierr = PetscOptionsEnum("-mat_cusparse_csr2csc_alg","sets cuSPARSE algorithm used in converting CSR matrices to CSC matrices",
"cusparseCsr2CscAlg_t",MatCUSPARSECsr2CscAlgorithms,(PetscEnum)cusparsestruct->csr2cscAlg,(PetscEnum*)&cusparsestruct->csr2cscAlg,&flg);CHKERRQ(ierr);
if (flg && CUSPARSE_CSR2CSC_ALG1 != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum cusparseCsr2CscAlg_t has been changed but PETSc has not been updated accordingly");
#endif
}
ierr = PetscOptionsTail();CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatILUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr);
B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatLUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr);
B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatICCFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr);
B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatCholeskyFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr);
B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt n = A->rmap->n;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
cusparseStatus_t stat;
const PetscInt *ai = a->i,*aj = a->j,*vi;
const MatScalar *aa = a->a,*v;
PetscInt *AiLo, *AjLo;
PetscInt i,nz, nzLower, offset, rowOffset;
PetscErrorCode ierr;
cudaError_t cerr;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(0);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
/* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */
nzLower=n+ai[n]-ai[1];
if (!loTriFactor) {
PetscScalar *AALo;
cerr = cudaMallocHost((void**) &AALo, nzLower*sizeof(PetscScalar));CHKERRCUDA(cerr);
/* Allocate Space for the lower triangular matrix */
cerr = cudaMallocHost((void**) &AiLo, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = cudaMallocHost((void**) &AjLo, nzLower*sizeof(PetscInt));CHKERRCUDA(cerr);
/* Fill the lower triangular matrix */
AiLo[0] = (PetscInt) 0;
AiLo[n] = nzLower;
AjLo[0] = (PetscInt) 0;
AALo[0] = (MatScalar) 1.0;
v = aa;
vi = aj;
offset = 1;
rowOffset= 1;
for (i=1; i<n; i++) {
nz = ai[i+1] - ai[i];
/* additional 1 for the term on the diagonal */
AiLo[i] = rowOffset;
rowOffset += nz+1;
ierr = PetscArraycpy(&(AjLo[offset]), vi, nz);CHKERRQ(ierr);
ierr = PetscArraycpy(&(AALo[offset]), v, nz);CHKERRQ(ierr);
offset += nz;
AjLo[offset] = (PetscInt) i;
AALo[offset] = (MatScalar) 1.0;
offset += 1;
v += nz;
vi += nz;
}
/* allocate space for the triangular factor information */
ierr = PetscNew(&loTriFactor);CHKERRQ(ierr);
loTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_LOWER);CHKERRCUSPARSE(stat);
stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUSPARSE(stat);
/* set the operation */
loTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE;
/* set the matrix */
loTriFactor->csrMat = new CsrMatrix;
loTriFactor->csrMat->num_rows = n;
loTriFactor->csrMat->num_cols = n;
loTriFactor->csrMat->num_entries = nzLower;
loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1);
loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo+n+1);
loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower);
loTriFactor->csrMat->column_indices->assign(AjLo, AjLo+nzLower);
loTriFactor->csrMat->values = new THRUSTARRAY(nzLower);
loTriFactor->csrMat->values->assign(AALo, AALo+nzLower);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&loTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo,
&loTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&loTriFactor->solveBuffer,loTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactor->solveInfo,
loTriFactor->solvePolicy, loTriFactor->solveBuffer);CHKERRCUSPARSE(stat);
#else
loTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor;
loTriFactor->AA_h = AALo;
cerr = cudaFreeHost(AiLo);CHKERRCUDA(cerr);
cerr = cudaFreeHost(AjLo);CHKERRCUDA(cerr);
ierr = PetscLogCpuToGpu((n+1+nzLower)*sizeof(int)+nzLower*sizeof(PetscScalar));CHKERRQ(ierr);
} else { /* update values only */
if (!loTriFactor->AA_h) {
cerr = cudaMallocHost((void**) &loTriFactor->AA_h, nzLower*sizeof(PetscScalar));CHKERRCUDA(cerr);
}
/* Fill the lower triangular matrix */
loTriFactor->AA_h[0] = 1.0;
v = aa;
vi = aj;
offset = 1;
for (i=1; i<n; i++) {
nz = ai[i+1] - ai[i];
ierr = PetscArraycpy(&(loTriFactor->AA_h[offset]), v, nz);CHKERRQ(ierr);
offset += nz;
loTriFactor->AA_h[offset] = 1.0;
offset += 1;
v += nz;
}
loTriFactor->csrMat->values->assign(loTriFactor->AA_h, loTriFactor->AA_h+nzLower);
ierr = PetscLogCpuToGpu(nzLower*sizeof(PetscScalar));CHKERRQ(ierr);
}
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt n = A->rmap->n;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
cusparseStatus_t stat;
const PetscInt *aj = a->j,*adiag = a->diag,*vi;
const MatScalar *aa = a->a,*v;
PetscInt *AiUp, *AjUp;
PetscInt i,nz, nzUpper, offset;
PetscErrorCode ierr;
cudaError_t cerr;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(0);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
/* next, figure out the number of nonzeros in the upper triangular matrix. */
nzUpper = adiag[0]-adiag[n];
if (!upTriFactor) {
PetscScalar *AAUp;
cerr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
/* Allocate Space for the upper triangular matrix */
cerr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(cerr);
/* Fill the upper triangular matrix */
AiUp[0]=(PetscInt) 0;
AiUp[n]=nzUpper;
offset = nzUpper;
for (i=n-1; i>=0; i--) {
v = aa + adiag[i+1] + 1;
vi = aj + adiag[i+1] + 1;
/* number of elements NOT on the diagonal */
nz = adiag[i] - adiag[i+1]-1;
/* decrement the offset */
offset -= (nz+1);
/* first, set the diagonal elements */
AjUp[offset] = (PetscInt) i;
AAUp[offset] = (MatScalar)1./v[nz];
AiUp[i] = AiUp[i+1] - (nz+1);
ierr = PetscArraycpy(&(AjUp[offset+1]), vi, nz);CHKERRQ(ierr);
ierr = PetscArraycpy(&(AAUp[offset+1]), v, nz);CHKERRQ(ierr);
}
/* allocate space for the triangular factor information */
ierr = PetscNew(&upTriFactor);CHKERRQ(ierr);
upTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat);
stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSPARSE(stat);
/* set the operation */
upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE;
/* set the matrix */
upTriFactor->csrMat = new CsrMatrix;
upTriFactor->csrMat->num_rows = n;
upTriFactor->csrMat->num_cols = n;
upTriFactor->csrMat->num_entries = nzUpper;
upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1);
upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+n+1);
upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper);
upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+nzUpper);
upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper);
upTriFactor->csrMat->values->assign(AAUp, AAUp+nzUpper);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&upTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo,
&upTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&upTriFactor->solveBuffer,upTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactor->solveInfo,
upTriFactor->solvePolicy, upTriFactor->solveBuffer);CHKERRCUSPARSE(stat);
#else
upTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor;
upTriFactor->AA_h = AAUp;
cerr = cudaFreeHost(AiUp);CHKERRCUDA(cerr);
cerr = cudaFreeHost(AjUp);CHKERRCUDA(cerr);
ierr = PetscLogCpuToGpu((n+1+nzUpper)*sizeof(int)+nzUpper*sizeof(PetscScalar));CHKERRQ(ierr);
} else {
if (!upTriFactor->AA_h) {
cerr = cudaMallocHost((void**) &upTriFactor->AA_h, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
}
/* Fill the upper triangular matrix */
offset = nzUpper;
for (i=n-1; i>=0; i--) {
v = aa + adiag[i+1] + 1;
/* number of elements NOT on the diagonal */
nz = adiag[i] - adiag[i+1]-1;
/* decrement the offset */
offset -= (nz+1);
/* first, set the diagonal elements */
upTriFactor->AA_h[offset] = 1./v[nz];
ierr = PetscArraycpy(&(upTriFactor->AA_h[offset+1]), v, nz);CHKERRQ(ierr);
}
upTriFactor->csrMat->values->assign(upTriFactor->AA_h, upTriFactor->AA_h+nzUpper);
ierr = PetscLogCpuToGpu(nzUpper*sizeof(PetscScalar));CHKERRQ(ierr);
}
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A)
{
PetscErrorCode ierr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
IS isrow = a->row,iscol = a->icol;
PetscBool row_identity,col_identity;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
ierr = MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A);CHKERRQ(ierr);
if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); }
cusparseTriFactors->nnz=a->nz;
A->offloadmask = PETSC_OFFLOAD_BOTH;
/* lower triangular indices */
ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr);
if (!row_identity && !cusparseTriFactors->rpermIndices) {
const PetscInt *r;
ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr);
cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->rpermIndices->assign(r, r+n);
ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
}
/* upper triangular indices */
ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr);
if (!col_identity && !cusparseTriFactors->cpermIndices) {
const PetscInt *c;
ierr = ISGetIndices(iscol,&c);CHKERRQ(ierr);
cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->cpermIndices->assign(c, c+n);
ierr = ISRestoreIndices(iscol,&c);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
cusparseStatus_t stat;
PetscErrorCode ierr;
cudaError_t cerr;
PetscInt *AiUp, *AjUp;
PetscScalar *AAUp;
PetscScalar *AALo;
PetscInt nzUpper = a->nz,n = A->rmap->n,i,offset,nz,j;
Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ*)A->data;
const PetscInt *ai = b->i,*aj = b->j,*vj;
const MatScalar *aa = b->a,*v;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(0);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
cerr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMallocHost((void**) &AALo, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
if (!upTriFactor && !loTriFactor) {
/* Allocate Space for the upper triangular matrix */
cerr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(cerr);
/* Fill the upper triangular matrix */
AiUp[0]=(PetscInt) 0;
AiUp[n]=nzUpper;
offset = 0;
for (i=0; i<n; i++) {
/* set the pointers */
v = aa + ai[i];
vj = aj + ai[i];
nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */
/* first, set the diagonal elements */
AjUp[offset] = (PetscInt) i;
AAUp[offset] = (MatScalar)1.0/v[nz];
AiUp[i] = offset;
AALo[offset] = (MatScalar)1.0/v[nz];
offset+=1;
if (nz>0) {
ierr = PetscArraycpy(&(AjUp[offset]), vj, nz);CHKERRQ(ierr);
ierr = PetscArraycpy(&(AAUp[offset]), v, nz);CHKERRQ(ierr);
for (j=offset; j<offset+nz; j++) {
AAUp[j] = -AAUp[j];
AALo[j] = AAUp[j]/v[nz];
}
offset+=nz;
}
}
/* allocate space for the triangular factor information */
ierr = PetscNew(&upTriFactor);CHKERRQ(ierr);
upTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat);
stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUSPARSE(stat);
/* set the matrix */
upTriFactor->csrMat = new CsrMatrix;
upTriFactor->csrMat->num_rows = A->rmap->n;
upTriFactor->csrMat->num_cols = A->cmap->n;
upTriFactor->csrMat->num_entries = a->nz;
upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1);
upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz);
upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz);
upTriFactor->csrMat->values = new THRUSTARRAY(a->nz);
upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz);
/* set the operation */
upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE;
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&upTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo,
&upTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&upTriFactor->solveBuffer,upTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactor->solveInfo,
upTriFactor->solvePolicy, upTriFactor->solveBuffer);CHKERRCUSPARSE(stat);
#else
upTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor;
/* allocate space for the triangular factor information */
ierr = PetscNew(&loTriFactor);CHKERRQ(ierr);
loTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat);
stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSPARSE(stat);
/* set the operation */
loTriFactor->solveOp = CUSPARSE_OPERATION_TRANSPOSE;
/* set the matrix */
loTriFactor->csrMat = new CsrMatrix;
loTriFactor->csrMat->num_rows = A->rmap->n;
loTriFactor->csrMat->num_cols = A->cmap->n;
loTriFactor->csrMat->num_entries = a->nz;
loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1);
loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz);
loTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz);
loTriFactor->csrMat->values = new THRUSTARRAY(a->nz);
loTriFactor->csrMat->values->assign(AALo, AALo+a->nz);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&loTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo,
&loTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&loTriFactor->solveBuffer,loTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactor->solveInfo,
loTriFactor->solvePolicy, loTriFactor->solveBuffer);CHKERRCUSPARSE(stat);
#else
loTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor;
ierr = PetscLogCpuToGpu(2*(((A->rmap->n+1)+(a->nz))*sizeof(int)+(a->nz)*sizeof(PetscScalar)));CHKERRQ(ierr);
cerr = cudaFreeHost(AiUp);CHKERRCUDA(cerr);
cerr = cudaFreeHost(AjUp);CHKERRCUDA(cerr);
} else {
/* Fill the upper triangular matrix */
offset = 0;
for (i=0; i<n; i++) {
/* set the pointers */
v = aa + ai[i];
nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */
/* first, set the diagonal elements */
AAUp[offset] = 1.0/v[nz];
AALo[offset] = 1.0/v[nz];
offset+=1;
if (nz>0) {
ierr = PetscArraycpy(&(AAUp[offset]), v, nz);CHKERRQ(ierr);
for (j=offset; j<offset+nz; j++) {
AAUp[j] = -AAUp[j];
AALo[j] = AAUp[j]/v[nz];
}
offset+=nz;
}
}
if (!upTriFactor) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
if (!loTriFactor) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz);
loTriFactor->csrMat->values->assign(AALo, AALo+a->nz);
ierr = PetscLogCpuToGpu(2*(a->nz)*sizeof(PetscScalar));CHKERRQ(ierr);
}
cerr = cudaFreeHost(AAUp);CHKERRCUDA(cerr);
cerr = cudaFreeHost(AALo);CHKERRCUDA(cerr);
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A)
{
PetscErrorCode ierr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
IS ip = a->row;
PetscBool perm_identity;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
ierr = MatSeqAIJCUSPARSEBuildICCTriMatrices(A);CHKERRQ(ierr);
if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); }
cusparseTriFactors->nnz=(a->nz-n)*2 + n;
A->offloadmask = PETSC_OFFLOAD_BOTH;
/* lower triangular indices */
ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr);
if (!perm_identity) {
IS iip;
const PetscInt *irip,*rip;
ierr = ISInvertPermutation(ip,PETSC_DECIDE,&iip);CHKERRQ(ierr);
ierr = ISGetIndices(iip,&irip);CHKERRQ(ierr);
ierr = ISGetIndices(ip,&rip);CHKERRQ(ierr);
cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->rpermIndices->assign(rip, rip+n);
cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->cpermIndices->assign(irip, irip+n);
ierr = ISRestoreIndices(iip,&irip);CHKERRQ(ierr);
ierr = ISDestroy(&iip);CHKERRQ(ierr);
ierr = ISRestoreIndices(ip,&rip);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info)
{
Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
IS ip = b->row;
PetscBool perm_identity;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
ierr = MatCholeskyFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr);
B->offloadmask = PETSC_OFFLOAD_CPU;
/* determine which version of MatSolve needs to be used. */
ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr);
if (perm_identity) {
B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
} else {
B->ops->solve = MatSolve_SeqAIJCUSPARSE;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
}
/* get the triangular factors */
ierr = MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT;
cusparseStatus_t stat;
cusparseIndexBase_t indexBase;
cusparseMatrixType_t matrixType;
cusparseFillMode_t fillMode;
cusparseDiagType_t diagType;
cudaError_t cerr;
PetscErrorCode ierr;
PetscFunctionBegin;
/* allocate space for the transpose of the lower triangular factor */
ierr = PetscNew(&loTriFactorT);CHKERRQ(ierr);
loTriFactorT->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* set the matrix descriptors of the lower triangular factor */
matrixType = cusparseGetMatType(loTriFactor->descr);
indexBase = cusparseGetMatIndexBase(loTriFactor->descr);
fillMode = cusparseGetMatFillMode(loTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ?
CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER;
diagType = cusparseGetMatDiagType(loTriFactor->descr);
/* Create the matrix description */
stat = cusparseCreateMatDescr(&loTriFactorT->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(loTriFactorT->descr, indexBase);CHKERRCUSPARSE(stat);
stat = cusparseSetMatType(loTriFactorT->descr, matrixType);CHKERRCUSPARSE(stat);
stat = cusparseSetMatFillMode(loTriFactorT->descr, fillMode);CHKERRCUSPARSE(stat);
stat = cusparseSetMatDiagType(loTriFactorT->descr, diagType);CHKERRCUSPARSE(stat);
/* set the operation */
loTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE;
/* allocate GPU space for the CSC of the lower triangular factor*/
loTriFactorT->csrMat = new CsrMatrix;
loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_cols;
loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_rows;
loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries;
loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_rows+1);
loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_entries);
loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactorT->csrMat->num_entries);
/* compute the transpose of the lower triangular factor, i.e. the CSC */
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows,
loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
CUSPARSE_ACTION_NUMERIC,indexBase,
CUSPARSE_CSR2CSC_ALG1, &loTriFactor->csr2cscBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&loTriFactor->csr2cscBuffer,loTriFactor->csr2cscBufferSize);CHKERRCUDA(cerr);
#endif
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows,
loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactorT->csrMat->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
CUSPARSE_ACTION_NUMERIC, indexBase,
CUSPARSE_CSR2CSC_ALG1, loTriFactor->csr2cscBuffer);CHKERRCUSPARSE(stat);
#else
loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->csrMat->row_offsets->data().get(),
CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&loTriFactorT->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo,
&loTriFactorT->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&loTriFactorT->solveBuffer,loTriFactorT->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactorT->solveInfo,
loTriFactorT->solvePolicy, loTriFactorT->solveBuffer);CHKERRCUSPARSE(stat);
#else
loTriFactorT->solveInfo);CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtrTranspose = loTriFactorT;
/*********************************************/
/* Now the Transpose of the Upper Tri Factor */
/*********************************************/
/* allocate space for the transpose of the upper triangular factor */
ierr = PetscNew(&upTriFactorT);CHKERRQ(ierr);
upTriFactorT->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* set the matrix descriptors of the upper triangular factor */
matrixType = cusparseGetMatType(upTriFactor->descr);
indexBase = cusparseGetMatIndexBase(upTriFactor->descr);
fillMode = cusparseGetMatFillMode(upTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ?
CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER;
diagType = cusparseGetMatDiagType(upTriFactor->descr);
/* Create the matrix description */
stat = cusparseCreateMatDescr(&upTriFactorT->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(upTriFactorT->descr, indexBase);CHKERRCUSPARSE(stat);
stat = cusparseSetMatType(upTriFactorT->descr, matrixType);CHKERRCUSPARSE(stat);
stat = cusparseSetMatFillMode(upTriFactorT->descr, fillMode);CHKERRCUSPARSE(stat);
stat = cusparseSetMatDiagType(upTriFactorT->descr, diagType);CHKERRCUSPARSE(stat);
/* set the operation */
upTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE;
/* allocate GPU space for the CSC of the upper triangular factor*/
upTriFactorT->csrMat = new CsrMatrix;
upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_cols;
upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_rows;
upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries;
upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_rows+1);
upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_entries);
upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactorT->csrMat->num_entries);
/* compute the transpose of the upper triangular factor, i.e. the CSC */
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle,upTriFactor->csrMat->num_rows,
upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
CUSPARSE_ACTION_NUMERIC,indexBase,
CUSPARSE_CSR2CSC_ALG1, &upTriFactor->csr2cscBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&upTriFactor->csr2cscBuffer,upTriFactor->csr2cscBufferSize);CHKERRCUDA(cerr);
#endif
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows,
upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactorT->csrMat->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
CUSPARSE_ACTION_NUMERIC, indexBase,
CUSPARSE_CSR2CSC_ALG1, upTriFactor->csr2cscBuffer);CHKERRCUSPARSE(stat);
#else
upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->csrMat->row_offsets->data().get(),
CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&upTriFactorT->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo,
&upTriFactorT->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&upTriFactorT->solveBuffer,upTriFactorT->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactorT->solveInfo,
upTriFactorT->solvePolicy, upTriFactorT->solveBuffer);CHKERRCUSPARSE(stat);
#else
upTriFactorT->solveInfo);CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtrTranspose = upTriFactorT;
PetscFunctionReturn(0);
}
struct PetscScalarToPetscInt
{
__host__ __device__
PetscInt operator()(PetscScalar s)
{
return (PetscInt)PetscRealPart(s);
}
};
static PetscErrorCode MatSeqAIJCUSPARSEFormExplicitTransposeForMult(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct, *matstructT;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
cusparseStatus_t stat;
cusparseIndexBase_t indexBase;
cudaError_t err;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!A->form_explicit_transpose || !A->rmap->n || !A->cmap->n) PetscFunctionReturn(0);
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat;
if (!matstruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing mat struct");
matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose;
if (A->transupdated && !matstructT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing matTranspose struct");
if (A->transupdated) PetscFunctionReturn(0);
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (cusparsestruct->format != MAT_CUSPARSE_CSR) {
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr);
}
if (!cusparsestruct->matTranspose) { /* create cusparse matrix */
matstructT = new Mat_SeqAIJCUSPARSEMultStruct;
stat = cusparseCreateMatDescr(&matstructT->descr);CHKERRCUSPARSE(stat);
indexBase = cusparseGetMatIndexBase(matstruct->descr);
stat = cusparseSetMatIndexBase(matstructT->descr, indexBase);CHKERRCUSPARSE(stat);
stat = cusparseSetMatType(matstructT->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
/* set alpha and beta */
err = cudaMalloc((void **)&(matstructT->alpha_one),sizeof(PetscScalar));CHKERRCUDA(err);
err = cudaMalloc((void **)&(matstructT->beta_zero),sizeof(PetscScalar));CHKERRCUDA(err);
err = cudaMalloc((void **)&(matstructT->beta_one), sizeof(PetscScalar));CHKERRCUDA(err);
err = cudaMemcpy(matstructT->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
err = cudaMemcpy(matstructT->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
err = cudaMemcpy(matstructT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *matrixT = new CsrMatrix;
matstructT->mat = matrixT;
matrixT->num_rows = A->cmap->n;
matrixT->num_cols = A->rmap->n;
matrixT->num_entries = a->nz;
matrixT->row_offsets = new THRUSTINTARRAY32(matrixT->num_rows+1);
matrixT->column_indices = new THRUSTINTARRAY32(a->nz);
matrixT->values = new THRUSTARRAY(a->nz);
if (!cusparsestruct->rowoffsets_gpu) { cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n+1); }
cusparsestruct->rowoffsets_gpu->assign(a->i,a->i+A->rmap->n+1);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparseCreateCsr(&matstructT->matDescr,
matrixT->num_rows, matrixT->num_cols, matrixT->num_entries,
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(),
matrixT->values->data().get(),
CUSPARSE_INDEX_32I,CUSPARSE_INDEX_32I, /* row offset, col idx type due to THRUSTINTARRAY32 */
indexBase,cusparse_scalartype);CHKERRCUSPARSE(stat);
#endif
} else if (cusparsestruct->format == MAT_CUSPARSE_ELL || cusparsestruct->format == MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
CsrMatrix *temp = new CsrMatrix;
CsrMatrix *tempT = new CsrMatrix;
/* First convert HYB to CSR */
temp->num_rows = A->rmap->n;
temp->num_cols = A->cmap->n;
temp->num_entries = a->nz;
temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
temp->column_indices = new THRUSTINTARRAY32(a->nz);
temp->values = new THRUSTARRAY(a->nz);
stat = cusparse_hyb2csr(cusparsestruct->handle,
matstruct->descr, (cusparseHybMat_t)matstruct->mat,
temp->values->data().get(),
temp->row_offsets->data().get(),
temp->column_indices->data().get());CHKERRCUSPARSE(stat);
/* Next, convert CSR to CSC (i.e. the matrix transpose) */
tempT->num_rows = A->rmap->n;
tempT->num_cols = A->cmap->n;
tempT->num_entries = a->nz;
tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
tempT->column_indices = new THRUSTINTARRAY32(a->nz);
tempT->values = new THRUSTARRAY(a->nz);
stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows,
temp->num_cols, temp->num_entries,
temp->values->data().get(),
temp->row_offsets->data().get(),
temp->column_indices->data().get(),
tempT->values->data().get(),
tempT->column_indices->data().get(),
tempT->row_offsets->data().get(),
CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat);
/* Last, convert CSC to HYB */
cusparseHybMat_t hybMat;
stat = cusparseCreateHybMat(&hybMat);CHKERRCUSPARSE(stat);
cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ?
CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO;
stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n,
matstructT->descr, tempT->values->data().get(),
tempT->row_offsets->data().get(),
tempT->column_indices->data().get(),
hybMat, 0, partition);CHKERRCUSPARSE(stat);
/* assign the pointer */
matstructT->mat = hybMat;
A->transupdated = PETSC_TRUE;
/* delete temporaries */
if (tempT) {
if (tempT->values) delete (THRUSTARRAY*) tempT->values;
if (tempT->column_indices) delete (THRUSTINTARRAY32*) tempT->column_indices;
if (tempT->row_offsets) delete (THRUSTINTARRAY32*) tempT->row_offsets;
delete (CsrMatrix*) tempT;
}
if (temp) {
if (temp->values) delete (THRUSTARRAY*) temp->values;
if (temp->column_indices) delete (THRUSTINTARRAY32*) temp->column_indices;
if (temp->row_offsets) delete (THRUSTINTARRAY32*) temp->row_offsets;
delete (CsrMatrix*) temp;
}
#endif
}
}
if (cusparsestruct->format == MAT_CUSPARSE_CSR) { /* transpose mat struct may be already present, update data */
CsrMatrix *matrix = (CsrMatrix*)matstruct->mat;
CsrMatrix *matrixT = (CsrMatrix*)matstructT->mat;
if (!matrix) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrix");
if (!matrix->row_offsets) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrix rows");
if (!matrix->column_indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrix cols");
if (!matrix->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrix values");
if (!matrixT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrixT");
if (!matrixT->row_offsets) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrixT rows");
if (!matrixT->column_indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrixT cols");
if (!matrixT->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrixT values");
if (!cusparsestruct->rowoffsets_gpu) { /* this may be absent when we did not construct the transpose with csr2csc */
cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1);
cusparsestruct->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1);
ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
if (!cusparsestruct->csr2csc_i) {
THRUSTARRAY csr2csc_a(matrix->num_entries);
PetscStackCallThrust(thrust::sequence(thrust::device, csr2csc_a.begin(), csr2csc_a.end(), 0.0));
indexBase = cusparseGetMatIndexBase(matstruct->descr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
void *csr2cscBuffer;
size_t csr2cscBufferSize;
stat = cusparseCsr2cscEx2_bufferSize(cusparsestruct->handle, A->rmap->n,
A->cmap->n, matrix->num_entries,
matrix->values->data().get(),
cusparsestruct->rowoffsets_gpu->data().get(),
matrix->column_indices->data().get(),
matrixT->values->data().get(),
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype,
CUSPARSE_ACTION_NUMERIC,indexBase,
cusparsestruct->csr2cscAlg, &csr2cscBufferSize);CHKERRCUSPARSE(stat);
err = cudaMalloc(&csr2cscBuffer,csr2cscBufferSize);CHKERRCUDA(err);
#endif
if (matrix->num_entries) {
/* When there are no nonzeros, this routine mistakenly returns CUSPARSE_STATUS_INVALID_VALUE in
mat_tests-ex62_15_mpiaijcusparse on ranks 0 and 2 with CUDA-11. But CUDA-10 is OK.
I checked every parameters and they were just fine. I have no clue why cusparse complains.
Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2, when nnz = 0, matrixT->row_offsets[]
should be filled with indexBase. So I just take a shortcut here.
*/
stat = cusparse_csr2csc(cusparsestruct->handle, A->rmap->n,
A->cmap->n,matrix->num_entries,
csr2csc_a.data().get(),
cusparsestruct->rowoffsets_gpu->data().get(),
matrix->column_indices->data().get(),
matrixT->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype,
CUSPARSE_ACTION_NUMERIC,indexBase,
cusparsestruct->csr2cscAlg, csr2cscBuffer);CHKERRCUSPARSE(stat);
#else
matrixT->column_indices->data().get(), matrixT->row_offsets->data().get(),
CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat);
#endif
} else {
matrixT->row_offsets->assign(matrixT->row_offsets->size(),indexBase);
}
cusparsestruct->csr2csc_i = new THRUSTINTARRAY(matrix->num_entries);
PetscStackCallThrust(thrust::transform(thrust::device,matrixT->values->begin(),matrixT->values->end(),cusparsestruct->csr2csc_i->begin(),PetscScalarToPetscInt()));
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
err = cudaFree(csr2cscBuffer);CHKERRCUDA(err);
#endif
}
PetscStackCallThrust(thrust::copy(thrust::device,thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->begin()),
thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->end()),
matrixT->values->begin()));
}
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogEventEnd(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
/* the compressed row indices is not used for matTranspose */
matstructT->cprowIndices = NULL;
/* assign the pointer */
((Mat_SeqAIJCUSPARSE*)A->spptr)->matTranspose = matstructT;
A->transupdated = PETSC_TRUE;
PetscFunctionReturn(0);
}
/* Why do we need to analyze the transposed matrix again? Can't we just use op(A) = CUSPARSE_OPERATION_TRANSPOSE in MatSolve_SeqAIJCUSPARSE? */
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx)
{
PetscInt n = xx->map->n;
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
cusparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
PetscFunctionBegin;
/* Analyze the matrix and create the transpose ... on the fly */
if (!loTriFactorT && !upTriFactorT) {
ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr);
loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
}
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, reorder with the row permutation */
thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()),
thrust::make_permutation_iterator(bGPU+n, cusparseTriFactors->rpermIndices->end()),
xGPU);
/* First, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(),
upTriFactorT->solveInfo,
xarray,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
tempGPU->data().get(),
upTriFactorT->solvePolicy, upTriFactorT->solveBuffer);CHKERRCUSPARSE(stat);
#else
tempGPU->data().get());CHKERRCUSPARSE(stat);
#endif
/* Then, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(),
loTriFactorT->solveInfo,
tempGPU->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
xarray,
loTriFactorT->solvePolicy, loTriFactorT->solveBuffer);CHKERRCUSPARSE(stat);
#else
xarray);CHKERRCUSPARSE(stat);
#endif
/* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */
thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()),
thrust::make_permutation_iterator(xGPU+n, cusparseTriFactors->cpermIndices->end()),
tempGPU->begin());
/* Copy the temporary to the full solution. */
thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),tempGPU->begin(), tempGPU->end(), xGPU);
/* restore */
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
cusparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
PetscFunctionBegin;
/* Analyze the matrix and create the transpose ... on the fly */
if (!loTriFactorT && !upTriFactorT) {
ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr);
loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
}
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(),
upTriFactorT->solveInfo,
barray,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
tempGPU->data().get(),
upTriFactorT->solvePolicy, upTriFactorT->solveBuffer);CHKERRCUSPARSE(stat);
#else
tempGPU->data().get());CHKERRCUSPARSE(stat);
#endif
/* Then, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(),
loTriFactorT->solveInfo,
tempGPU->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
xarray,
loTriFactorT->solvePolicy, loTriFactorT->solveBuffer);CHKERRCUSPARSE(stat);
#else
xarray);CHKERRCUSPARSE(stat);
#endif
/* restore */
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
cusparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
PetscFunctionBegin;
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, reorder with the row permutation */
thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()),
thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()),
tempGPU->begin());
/* Next, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactor->solveInfo,
tempGPU->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
xarray,
loTriFactor->solvePolicy, loTriFactor->solveBuffer);CHKERRCUSPARSE(stat);
#else
xarray);CHKERRCUSPARSE(stat);
#endif
/* Then, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactor->solveInfo,xarray,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
tempGPU->data().get(),
upTriFactor->solvePolicy, upTriFactor->solveBuffer);CHKERRCUSPARSE(stat);
#else
tempGPU->data().get());CHKERRCUSPARSE(stat);
#endif
/* Last, reorder with the column permutation */
thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()),
thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()),
xGPU);
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
cusparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
PetscFunctionBegin;
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactor->solveInfo,
barray,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
tempGPU->data().get(),
loTriFactor->solvePolicy,loTriFactor->solveBuffer);CHKERRCUSPARSE(stat);
#else
tempGPU->data().get());CHKERRCUSPARSE(stat);
#endif
/* Next, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactor->solveInfo,
tempGPU->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
xarray,
upTriFactor->solvePolicy, upTriFactor->solveBuffer);CHKERRCUSPARSE(stat);
#else
xarray);CHKERRCUSPARSE(stat);
#endif
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
cudaError_t cerr;
PetscErrorCode ierr;
PetscFunctionBegin;
if (A->offloadmask == PETSC_OFFLOAD_GPU) {
CsrMatrix *matrix = (CsrMatrix*)cusp->mat->mat;
ierr = PetscLogEventBegin(MAT_CUSPARSECopyFromGPU,A,0,0,0);CHKERRQ(ierr);
cerr = cudaMemcpy(a->a, matrix->values->data().get(), a->nz*sizeof(PetscScalar), cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuToCpu(a->nz*sizeof(PetscScalar));CHKERRQ(ierr);
ierr = PetscLogEventEnd(MAT_CUSPARSECopyFromGPU,A,0,0,0);CHKERRQ(ierr);
A->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJGetArray_SeqAIJCUSPARSE(Mat A,PetscScalar *array[])
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
*array = a->a;
A->offloadmask = PETSC_OFFLOAD_CPU;
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct = cusparsestruct->mat;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt m = A->rmap->n,*ii,*ridx,tmp;
PetscErrorCode ierr;
cusparseStatus_t stat;
PetscBool both = PETSC_TRUE;
cudaError_t err;
PetscFunctionBegin;
if (A->boundtocpu) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Cannot copy to GPU");
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
if (A->nonzerostate == cusparsestruct->nonzerostate && cusparsestruct->format == MAT_CUSPARSE_CSR) { /* Copy values only */
CsrMatrix *matrix;
matrix = (CsrMatrix*)cusparsestruct->mat->mat;
if (a->nz && !a->a) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CSR values");
ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
matrix->values->assign(a->a, a->a+a->nz);
err = WaitForCUDA();CHKERRCUDA(err);
ierr = PetscLogCpuToGpu((a->nz)*sizeof(PetscScalar));CHKERRQ(ierr);
ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr);
} else {
PetscInt nnz;
ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusparsestruct->mat,cusparsestruct->format);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr);
delete cusparsestruct->workVector;
delete cusparsestruct->rowoffsets_gpu;
cusparsestruct->workVector = NULL;
cusparsestruct->rowoffsets_gpu = NULL;
try {
if (a->compressedrow.use) {
m = a->compressedrow.nrows;
ii = a->compressedrow.i;
ridx = a->compressedrow.rindex;
} else {
m = A->rmap->n;
ii = a->i;
ridx = NULL;
}
if (!ii) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CSR row data");
if (m && !a->j) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CSR column data");
if (!a->a) { nnz = ii[m]; both = PETSC_FALSE; }
else nnz = a->nz;
/* create cusparse matrix */
cusparsestruct->nrows = m;
matstruct = new Mat_SeqAIJCUSPARSEMultStruct;
stat = cusparseCreateMatDescr(&matstruct->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(matstruct->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = cusparseSetMatType(matstruct->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
err = cudaMalloc((void **)&(matstruct->alpha_one),sizeof(PetscScalar));CHKERRCUDA(err);
err = cudaMalloc((void **)&(matstruct->beta_zero),sizeof(PetscScalar));CHKERRCUDA(err);
err = cudaMalloc((void **)&(matstruct->beta_one), sizeof(PetscScalar));CHKERRCUDA(err);
err = cudaMemcpy(matstruct->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
err = cudaMemcpy(matstruct->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
err = cudaMemcpy(matstruct->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
/* Build a hybrid/ellpack matrix if this option is chosen for the storage */
if (cusparsestruct->format==MAT_CUSPARSE_CSR) {
/* set the matrix */
CsrMatrix *mat= new CsrMatrix;
mat->num_rows = m;
mat->num_cols = A->cmap->n;
mat->num_entries = nnz;
mat->row_offsets = new THRUSTINTARRAY32(m+1);
mat->row_offsets->assign(ii, ii + m+1);
mat->column_indices = new THRUSTINTARRAY32(nnz);
mat->column_indices->assign(a->j, a->j+nnz);
mat->values = new THRUSTARRAY(nnz);
if (a->a) mat->values->assign(a->a, a->a+nnz);
/* assign the pointer */
matstruct->mat = mat;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (mat->num_rows) { /* cusparse errors on empty matrices! */
stat = cusparseCreateCsr(&matstruct->matDescr,
mat->num_rows, mat->num_cols, mat->num_entries,
mat->row_offsets->data().get(), mat->column_indices->data().get(),
mat->values->data().get(),
CUSPARSE_INDEX_32I,CUSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */
CUSPARSE_INDEX_BASE_ZERO,cusparse_scalartype);CHKERRCUSPARSE(stat);
}
#endif
} else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
CsrMatrix *mat= new CsrMatrix;
mat->num_rows = m;
mat->num_cols = A->cmap->n;
mat->num_entries = nnz;
mat->row_offsets = new THRUSTINTARRAY32(m+1);
mat->row_offsets->assign(ii, ii + m+1);
mat->column_indices = new THRUSTINTARRAY32(nnz);
mat->column_indices->assign(a->j, a->j+nnz);
mat->values = new THRUSTARRAY(nnz);
if (a->a) mat->values->assign(a->a, a->a+nnz);
cusparseHybMat_t hybMat;
stat = cusparseCreateHybMat(&hybMat);CHKERRCUSPARSE(stat);
cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ?
CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO;
stat = cusparse_csr2hyb(cusparsestruct->handle, mat->num_rows, mat->num_cols,
matstruct->descr, mat->values->data().get(),
mat->row_offsets->data().get(),
mat->column_indices->data().get(),
hybMat, 0, partition);CHKERRCUSPARSE(stat);
/* assign the pointer */
matstruct->mat = hybMat;
if (mat) {
if (mat->values) delete (THRUSTARRAY*)mat->values;
if (mat->column_indices) delete (THRUSTINTARRAY32*)mat->column_indices;
if (mat->row_offsets) delete (THRUSTINTARRAY32*)mat->row_offsets;
delete (CsrMatrix*)mat;
}
#endif
}
/* assign the compressed row indices */
if (a->compressedrow.use) {
cusparsestruct->workVector = new THRUSTARRAY(m);
matstruct->cprowIndices = new THRUSTINTARRAY(m);
matstruct->cprowIndices->assign(ridx,ridx+m);
tmp = m;
} else {
cusparsestruct->workVector = NULL;
matstruct->cprowIndices = NULL;
tmp = 0;
}
ierr = PetscLogCpuToGpu(((m+1)+(a->nz))*sizeof(int)+tmp*sizeof(PetscInt)+(3+(a->nz))*sizeof(PetscScalar));CHKERRQ(ierr);
/* assign the pointer */
cusparsestruct->mat = matstruct;
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
err = WaitForCUDA();CHKERRCUDA(err);
ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
cusparsestruct->nonzerostate = A->nonzerostate;
}
if (both) A->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(0);
}
struct VecCUDAPlusEquals
{
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t);
}
};
struct VecCUDAEquals
{
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<1>(t) = thrust::get<0>(t);
}
};
struct VecCUDAEqualsReverse
{
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<0>(t) = thrust::get<1>(t);
}
};
struct MatMatCusparse {
PetscBool cisdense;
PetscScalar *Bt;
Mat X;
PetscBool reusesym; /* Cusparse does not have split symbolic and numeric phases for sparse matmat operations */
PetscLogDouble flops;
CsrMatrix *Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
cusparseSpMatDescr_t matSpBDescr;
PetscBool initialized; /* C = alpha op(A) op(B) + beta C */
cusparseDnMatDescr_t matBDescr;
cusparseDnMatDescr_t matCDescr;
PetscInt Blda,Clda; /* Record leading dimensions of B and C here to detect changes*/
#if PETSC_PKG_CUDA_VERSION_GE(11,4,0)
void *dBuffer4;
void *dBuffer5;
#endif
size_t mmBufferSize;
void *mmBuffer;
void *mmBuffer2; /* SpGEMM WorkEstimation buffer */
cusparseSpGEMMDescr_t spgemmDesc;
#endif
};
static PetscErrorCode MatDestroy_MatMatCusparse(void *data)
{
PetscErrorCode ierr;
MatMatCusparse *mmdata = (MatMatCusparse *)data;
cudaError_t cerr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
cusparseStatus_t stat;
#endif
PetscFunctionBegin;
cerr = cudaFree(mmdata->Bt);CHKERRCUDA(cerr);
delete mmdata->Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (mmdata->matSpBDescr) { stat = cusparseDestroySpMat(mmdata->matSpBDescr);CHKERRCUSPARSE(stat); }
if (mmdata->matBDescr) { stat = cusparseDestroyDnMat(mmdata->matBDescr);CHKERRCUSPARSE(stat); }
if (mmdata->matCDescr) { stat = cusparseDestroyDnMat(mmdata->matCDescr);CHKERRCUSPARSE(stat); }
if (mmdata->spgemmDesc) { stat = cusparseSpGEMM_destroyDescr(mmdata->spgemmDesc);CHKERRCUSPARSE(stat); }
#if PETSC_PKG_CUDA_VERSION_GE(11,4,0)
if (mmdata->dBuffer4) { cerr = cudaFree(mmdata->dBuffer4);CHKERRCUDA(cerr); }
if (mmdata->dBuffer5) { cerr = cudaFree(mmdata->dBuffer5);CHKERRCUDA(cerr); }
#endif
if (mmdata->mmBuffer) { cerr = cudaFree(mmdata->mmBuffer);CHKERRCUDA(cerr); }
if (mmdata->mmBuffer2) { cerr = cudaFree(mmdata->mmBuffer2);CHKERRCUDA(cerr); }
#endif
ierr = MatDestroy(&mmdata->X);CHKERRQ(ierr);
ierr = PetscFree(data);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(Mat,Mat,Mat,PetscBool,PetscBool);
static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
PetscInt m,n,blda,clda;
PetscBool flg,biscuda;
Mat_SeqAIJCUSPARSE *cusp;
cusparseStatus_t stat;
cusparseOperation_t opA;
const PetscScalar *barray;
PetscScalar *carray;
PetscErrorCode ierr;
MatMatCusparse *mmdata;
Mat_SeqAIJCUSPARSEMultStruct *mat;
CsrMatrix *csrmat;
PetscFunctionBegin;
MatCheckProduct(C,1);
if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Product data empty");
mmdata = (MatMatCusparse*)product->data;
A = product->A;
B = product->B;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_GPU,"Not for type %s",((PetscObject)A)->type_name);
/* currently CopyToGpu does not copy if the matrix is bound to CPU
Instead of silently accepting the wrong answer, I prefer to raise the error */
if (A->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_PtAP:
mat = cusp->mat;
opA = CUSPARSE_OPERATION_NON_TRANSPOSE;
m = A->rmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_AtB:
if (!A->form_explicit_transpose) {
mat = cusp->mat;
opA = CUSPARSE_OPERATION_TRANSPOSE;
} else {
ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr);
mat = cusp->matTranspose;
opA = CUSPARSE_OPERATION_NON_TRANSPOSE;
}
m = A->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_ABt:
case MATPRODUCT_RARt:
mat = cusp->mat;
opA = CUSPARSE_OPERATION_NON_TRANSPOSE;
m = A->rmap->n;
n = B->rmap->n;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Unsupported product type %s",MatProductTypes[product->type]);
}
if (!mat) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing Mat_SeqAIJCUSPARSEMultStruct");
csrmat = (CsrMatrix*)mat->mat;
/* if the user passed a CPU matrix, copy the data to the GPU */
ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQDENSECUDA,&biscuda);CHKERRQ(ierr);
if (!biscuda) {ierr = MatConvert(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);}
ierr = MatDenseCUDAGetArrayRead(B,&barray);CHKERRQ(ierr);
ierr = MatDenseGetLDA(B,&blda);CHKERRQ(ierr);
if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) {
ierr = MatDenseCUDAGetArrayWrite(mmdata->X,&carray);CHKERRQ(ierr);
ierr = MatDenseGetLDA(mmdata->X,&clda);CHKERRQ(ierr);
} else {
ierr = MatDenseCUDAGetArrayWrite(C,&carray);CHKERRQ(ierr);
ierr = MatDenseGetLDA(C,&clda);CHKERRQ(ierr);
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
cusparseOperation_t opB = (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) ? CUSPARSE_OPERATION_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE;
/* (re)allocate mmBuffer if not initialized or LDAs are different */
if (!mmdata->initialized || mmdata->Blda != blda || mmdata->Clda != clda) {
size_t mmBufferSize;
if (mmdata->initialized && mmdata->Blda != blda) {stat = cusparseDestroyDnMat(mmdata->matBDescr);CHKERRCUSPARSE(stat); mmdata->matBDescr = NULL;}
if (!mmdata->matBDescr) {
stat = cusparseCreateDnMat(&mmdata->matBDescr,B->rmap->n,B->cmap->n,blda,(void*)barray,cusparse_scalartype,CUSPARSE_ORDER_COL);CHKERRCUSPARSE(stat);
mmdata->Blda = blda;
}
if (mmdata->initialized && mmdata->Clda != clda) {stat = cusparseDestroyDnMat(mmdata->matCDescr);CHKERRCUSPARSE(stat); mmdata->matCDescr = NULL;}
if (!mmdata->matCDescr) { /* matCDescr is for C or mmdata->X */
stat = cusparseCreateDnMat(&mmdata->matCDescr,m,n,clda,(void*)carray,cusparse_scalartype,CUSPARSE_ORDER_COL);CHKERRCUSPARSE(stat);
mmdata->Clda = clda;
}
if (!mat->matDescr) {
stat = cusparseCreateCsr(&mat->matDescr,
csrmat->num_rows, csrmat->num_cols, csrmat->num_entries,
csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(),
csrmat->values->data().get(),
CUSPARSE_INDEX_32I,CUSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */
CUSPARSE_INDEX_BASE_ZERO,cusparse_scalartype);CHKERRCUSPARSE(stat);
}
stat = cusparseSpMM_bufferSize(cusp->handle,opA,opB,mat->alpha_one,
mat->matDescr,mmdata->matBDescr,mat->beta_zero,
mmdata->matCDescr,cusparse_scalartype,
cusp->spmmAlg,&mmBufferSize);CHKERRCUSPARSE(stat);
if ((mmdata->mmBuffer && mmdata->mmBufferSize < mmBufferSize) || !mmdata->mmBuffer) {
cudaError_t cerr;
cerr = cudaFree(mmdata->mmBuffer);CHKERRCUDA(cerr);
cerr = cudaMalloc(&mmdata->mmBuffer,mmBufferSize);CHKERRCUDA(cerr);
mmdata->mmBufferSize = mmBufferSize;
}
mmdata->initialized = PETSC_TRUE;
} else {
/* to be safe, always update pointers of the mats */
stat = cusparseSpMatSetValues(mat->matDescr,csrmat->values->data().get());CHKERRCUSPARSE(stat);
stat = cusparseDnMatSetValues(mmdata->matBDescr,(void*)barray);CHKERRCUSPARSE(stat);
stat = cusparseDnMatSetValues(mmdata->matCDescr,(void*)carray);CHKERRCUSPARSE(stat);
}
/* do cusparseSpMM, which supports transpose on B */
stat = cusparseSpMM(cusp->handle,opA,opB,mat->alpha_one,
mat->matDescr,mmdata->matBDescr,mat->beta_zero,
mmdata->matCDescr,cusparse_scalartype,
cusp->spmmAlg,mmdata->mmBuffer);CHKERRCUSPARSE(stat);
#else
PetscInt k;
/* cusparseXcsrmm does not support transpose on B */
if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) {
cublasHandle_t cublasv2handle;
cublasStatus_t cerr;
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
cerr = cublasXgeam(cublasv2handle,CUBLAS_OP_T,CUBLAS_OP_T,
B->cmap->n,B->rmap->n,
&PETSC_CUSPARSE_ONE ,barray,blda,
&PETSC_CUSPARSE_ZERO,barray,blda,
mmdata->Bt,B->cmap->n);CHKERRCUBLAS(cerr);
blda = B->cmap->n;
k = B->cmap->n;
} else {
k = B->rmap->n;
}
/* perform the MatMat operation, op(A) is m x k, op(B) is k x n */
stat = cusparse_csr_spmm(cusp->handle,opA,m,n,k,
csrmat->num_entries,mat->alpha_one,mat->descr,
csrmat->values->data().get(),
csrmat->row_offsets->data().get(),
csrmat->column_indices->data().get(),
mmdata->Bt ? mmdata->Bt : barray,blda,mat->beta_zero,
carray,clda);CHKERRCUSPARSE(stat);
#endif
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(n*2.0*csrmat->num_entries);CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayRead(B,&barray);CHKERRQ(ierr);
if (product->type == MATPRODUCT_RARt) {
ierr = MatDenseCUDARestoreArrayWrite(mmdata->X,&carray);CHKERRQ(ierr);
ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(B,mmdata->X,C,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr);
} else if (product->type == MATPRODUCT_PtAP) {
ierr = MatDenseCUDARestoreArrayWrite(mmdata->X,&carray);CHKERRQ(ierr);
ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(B,mmdata->X,C,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr);
} else {
ierr = MatDenseCUDARestoreArrayWrite(C,&carray);CHKERRQ(ierr);
}
if (mmdata->cisdense) {
ierr = MatConvert(C,MATSEQDENSE,MAT_INPLACE_MATRIX,&C);CHKERRQ(ierr);
}
if (!biscuda) {
ierr = MatConvert(B,MATSEQDENSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
PetscInt m,n;
PetscBool cisdense,flg;
PetscErrorCode ierr;
MatMatCusparse *mmdata;
Mat_SeqAIJCUSPARSE *cusp;
PetscFunctionBegin;
MatCheckProduct(C,1);
if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Product data not empty");
A = product->A;
B = product->B;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for type %s",((PetscObject)A)->type_name);
cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
if (cusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format");
switch (product->type) {
case MATPRODUCT_AB:
m = A->rmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_AtB:
m = A->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_ABt:
m = A->rmap->n;
n = B->rmap->n;
break;
case MATPRODUCT_PtAP:
m = B->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_RARt:
m = B->rmap->n;
n = B->rmap->n;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Unsupported product type %s",MatProductTypes[product->type]);
}
ierr = MatSetSizes(C,m,n,m,n);CHKERRQ(ierr);
/* if C is of type MATSEQDENSE (CPU), perform the operation on the GPU and then copy on the CPU */
ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQDENSE,&cisdense);CHKERRQ(ierr);
ierr = MatSetType(C,MATSEQDENSECUDA);CHKERRQ(ierr);
/* product data */
ierr = PetscNew(&mmdata);CHKERRQ(ierr);
mmdata->cisdense = cisdense;
#if PETSC_PKG_CUDA_VERSION_LT(11,0,0)
/* cusparseXcsrmm does not support transpose on B, so we allocate buffer to store B^T */
if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) {
cudaError_t cerr = cudaMalloc((void**)&mmdata->Bt,(size_t)B->rmap->n*(size_t)B->cmap->n*sizeof(PetscScalar));CHKERRCUDA(cerr);
}
#endif
/* for these products we need intermediate storage */
if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) {
ierr = MatCreate(PetscObjectComm((PetscObject)C),&mmdata->X);CHKERRQ(ierr);
ierr = MatSetType(mmdata->X,MATSEQDENSECUDA);CHKERRQ(ierr);
if (product->type == MATPRODUCT_RARt) { /* do not preallocate, since the first call to MatDenseCUDAGetArray will preallocate on the GPU for us */
ierr = MatSetSizes(mmdata->X,A->rmap->n,B->rmap->n,A->rmap->n,B->rmap->n);CHKERRQ(ierr);
} else {
ierr = MatSetSizes(mmdata->X,A->rmap->n,B->cmap->n,A->rmap->n,B->cmap->n);CHKERRQ(ierr);
}
}
C->product->data = mmdata;
C->product->destroy = MatDestroy_MatMatCusparse;
C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA;
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
Mat_SeqAIJCUSPARSE *Acusp,*Bcusp,*Ccusp;
Mat_SeqAIJ *c = (Mat_SeqAIJ*)C->data;
Mat_SeqAIJCUSPARSEMultStruct *Amat,*Bmat,*Cmat;
CsrMatrix *Acsr,*Bcsr,*Ccsr;
PetscBool flg;
PetscErrorCode ierr;
cusparseStatus_t stat;
cudaError_t cerr;
MatProductType ptype;
MatMatCusparse *mmdata;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
cusparseSpMatDescr_t BmatSpDescr;
#endif
cusparseOperation_t opA = CUSPARSE_OPERATION_NON_TRANSPOSE,opB = CUSPARSE_OPERATION_NON_TRANSPOSE; /* cuSPARSE spgemm doesn't support transpose yet */
PetscFunctionBegin;
MatCheckProduct(C,1);
if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Product data empty");
ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for C of type %s",((PetscObject)C)->type_name);
mmdata = (MatMatCusparse*)C->product->data;
A = product->A;
B = product->B;
if (mmdata->reusesym) { /* this happens when api_user is true, meaning that the matrix values have been already computed in the MatProductSymbolic phase */
mmdata->reusesym = PETSC_FALSE;
Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr;
if (Ccusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format");
Cmat = Ccusp->mat;
if (!Cmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing C mult struct for product type %s",MatProductTypes[C->product->type]);
Ccsr = (CsrMatrix*)Cmat->mat;
if (!Ccsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing C CSR struct");
goto finalize;
}
if (!c->nz) goto finalize;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for type %s",((PetscObject)A)->type_name);
ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for B of type %s",((PetscObject)B)->type_name);
if (A->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
if (B->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr;
Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr;
if (Acusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format");
if (Bcusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format");
if (Ccusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
ptype = product->type;
if (A->symmetric && ptype == MATPRODUCT_AtB) ptype = MATPRODUCT_AB;
if (B->symmetric && ptype == MATPRODUCT_ABt) ptype = MATPRODUCT_AB;
switch (ptype) {
case MATPRODUCT_AB:
Amat = Acusp->mat;
Bmat = Bcusp->mat;
break;
case MATPRODUCT_AtB:
Amat = Acusp->matTranspose;
Bmat = Bcusp->mat;
break;
case MATPRODUCT_ABt:
Amat = Acusp->mat;
Bmat = Bcusp->matTranspose;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Unsupported product type %s",MatProductTypes[product->type]);
}
Cmat = Ccusp->mat;
if (!Amat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing A mult struct for product type %s",MatProductTypes[ptype]);
if (!Bmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing B mult struct for product type %s",MatProductTypes[ptype]);
if (!Cmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing C mult struct for product type %s",MatProductTypes[ptype]);
Acsr = (CsrMatrix*)Amat->mat;
Bcsr = mmdata->Bcsr ? mmdata->Bcsr : (CsrMatrix*)Bmat->mat; /* B may be in compressed row storage */
Ccsr = (CsrMatrix*)Cmat->mat;
if (!Acsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing A CSR struct");
if (!Bcsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing B CSR struct");
if (!Ccsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing C CSR struct");
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
BmatSpDescr = mmdata->Bcsr ? mmdata->matSpBDescr : Bmat->matDescr; /* B may be in compressed row storage */
stat = cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(11,4,0)
stat = cusparseSpGEMMreuse_compute(Ccusp->handle, opA, opB,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
#else
stat = cusparseSpGEMM_compute(Ccusp->handle, opA, opB,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);CHKERRCUSPARSE(stat);
stat = cusparseSpGEMM_copy(Ccusp->handle, opA, opB,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
#endif
#else
stat = cusparse_csr_spgemm(Ccusp->handle, opA, opB,
Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols,
Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(),
Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());CHKERRCUSPARSE(stat);
#endif
ierr = PetscLogGpuFlops(mmdata->flops);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
C->offloadmask = PETSC_OFFLOAD_GPU;
finalize:
/* shorter version of MatAssemblyEnd_SeqAIJ */
ierr = PetscInfo3(C,"Matrix size: %D X %D; storage space: 0 unneeded,%D used\n",C->rmap->n,C->cmap->n,c->nz);CHKERRQ(ierr);
ierr = PetscInfo(C,"Number of mallocs during MatSetValues() is 0\n");CHKERRQ(ierr);
ierr = PetscInfo1(C,"Maximum nonzeros in any row is %D\n",c->rmax);CHKERRQ(ierr);
c->reallocs = 0;
C->info.mallocs += 0;
C->info.nz_unneeded = 0;
C->assembled = C->was_assembled = PETSC_TRUE;
C->num_ass++;
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
Mat_SeqAIJCUSPARSE *Acusp,*Bcusp,*Ccusp;
Mat_SeqAIJ *a,*b,*c;
Mat_SeqAIJCUSPARSEMultStruct *Amat,*Bmat,*Cmat;
CsrMatrix *Acsr,*Bcsr,*Ccsr;
PetscInt i,j,m,n,k;
PetscBool flg;
PetscErrorCode ierr;
cusparseStatus_t stat;
cudaError_t cerr;
MatProductType ptype;
MatMatCusparse *mmdata;
PetscLogDouble flops;
PetscBool biscompressed,ciscompressed;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
int64_t C_num_rows1, C_num_cols1, C_nnz1;
cusparseSpMatDescr_t BmatSpDescr;
#else
int cnz;
#endif
cusparseOperation_t opA = CUSPARSE_OPERATION_NON_TRANSPOSE,opB = CUSPARSE_OPERATION_NON_TRANSPOSE; /* cuSPARSE spgemm doesn't support transpose yet */
PetscFunctionBegin;
MatCheckProduct(C,1);
if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Product data not empty");
A = product->A;
B = product->B;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for type %s",((PetscObject)A)->type_name);
ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for B of type %s",((PetscObject)B)->type_name);
a = (Mat_SeqAIJ*)A->data;
b = (Mat_SeqAIJ*)B->data;
Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr;
if (Acusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format");
if (Bcusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format");
/* product data */
ierr = PetscNew(&mmdata);CHKERRQ(ierr);
C->product->data = mmdata;
C->product->destroy = MatDestroy_MatMatCusparse;
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
ptype = product->type;
if (A->symmetric && ptype == MATPRODUCT_AtB) ptype = MATPRODUCT_AB;
if (B->symmetric && ptype == MATPRODUCT_ABt) ptype = MATPRODUCT_AB;
biscompressed = PETSC_FALSE;
ciscompressed = PETSC_FALSE;
switch (ptype) {
case MATPRODUCT_AB:
m = A->rmap->n;
n = B->cmap->n;
k = A->cmap->n;
Amat = Acusp->mat;
Bmat = Bcusp->mat;
if (a->compressedrow.use) ciscompressed = PETSC_TRUE;
if (b->compressedrow.use) biscompressed = PETSC_TRUE;
break;
case MATPRODUCT_AtB:
m = A->cmap->n;
n = B->cmap->n;
k = A->rmap->n;
ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr);
Amat = Acusp->matTranspose;
Bmat = Bcusp->mat;
if (b->compressedrow.use) biscompressed = PETSC_TRUE;
break;
case MATPRODUCT_ABt:
m = A->rmap->n;
n = B->rmap->n;
k = A->cmap->n;
ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(B);CHKERRQ(ierr);
Amat = Acusp->mat;
Bmat = Bcusp->matTranspose;
if (a->compressedrow.use) ciscompressed = PETSC_TRUE;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Unsupported product type %s",MatProductTypes[product->type]);
}
/* create cusparse matrix */
ierr = MatSetSizes(C,m,n,m,n);CHKERRQ(ierr);
ierr = MatSetType(C,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
c = (Mat_SeqAIJ*)C->data;
Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr;
Cmat = new Mat_SeqAIJCUSPARSEMultStruct;
Ccsr = new CsrMatrix;
c->compressedrow.use = ciscompressed;
if (c->compressedrow.use) { /* if a is in compressed row, than c will be in compressed row format */
c->compressedrow.nrows = a->compressedrow.nrows;
ierr = PetscMalloc2(c->compressedrow.nrows+1,&c->compressedrow.i,c->compressedrow.nrows,&c->compressedrow.rindex);CHKERRQ(ierr);
ierr = PetscArraycpy(c->compressedrow.rindex,a->compressedrow.rindex,c->compressedrow.nrows);CHKERRQ(ierr);
Ccusp->workVector = new THRUSTARRAY(c->compressedrow.nrows);
Cmat->cprowIndices = new THRUSTINTARRAY(c->compressedrow.nrows);
Cmat->cprowIndices->assign(c->compressedrow.rindex,c->compressedrow.rindex + c->compressedrow.nrows);
} else {
c->compressedrow.nrows = 0;
c->compressedrow.i = NULL;
c->compressedrow.rindex = NULL;
Ccusp->workVector = NULL;
Cmat->cprowIndices = NULL;
}
Ccusp->nrows = ciscompressed ? c->compressedrow.nrows : m;
Ccusp->mat = Cmat;
Ccusp->mat->mat = Ccsr;
Ccsr->num_rows = Ccusp->nrows;
Ccsr->num_cols = n;
Ccsr->row_offsets = new THRUSTINTARRAY32(Ccusp->nrows+1);
stat = cusparseCreateMatDescr(&Cmat->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(Cmat->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = cusparseSetMatType(Cmat->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
cerr = cudaMalloc((void **)&(Cmat->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&(Cmat->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMemcpy(Cmat->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMemcpy(Cmat->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
if (!Ccsr->num_rows || !Ccsr->num_cols || !a->nz || !b->nz) { /* cusparse raise errors in different calls when matrices have zero rows/columns! */
thrust::fill(thrust::device,Ccsr->row_offsets->begin(),Ccsr->row_offsets->end(),0);
c->nz = 0;
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
Ccsr->values = new THRUSTARRAY(c->nz);
goto finalizesym;
}
if (!Amat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing A mult struct for product type %s",MatProductTypes[ptype]);
if (!Bmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing B mult struct for product type %s",MatProductTypes[ptype]);
Acsr = (CsrMatrix*)Amat->mat;
if (!biscompressed) {
Bcsr = (CsrMatrix*)Bmat->mat;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
BmatSpDescr = Bmat->matDescr;
#endif
} else { /* we need to use row offsets for the full matrix */
CsrMatrix *cBcsr = (CsrMatrix*)Bmat->mat;
Bcsr = new CsrMatrix;
Bcsr->num_rows = B->rmap->n;
Bcsr->num_cols = cBcsr->num_cols;
Bcsr->num_entries = cBcsr->num_entries;
Bcsr->column_indices = cBcsr->column_indices;
Bcsr->values = cBcsr->values;
if (!Bcusp->rowoffsets_gpu) {
Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1);
Bcusp->rowoffsets_gpu->assign(b->i,b->i + B->rmap->n + 1);
ierr = PetscLogCpuToGpu((B->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
Bcsr->row_offsets = Bcusp->rowoffsets_gpu;
mmdata->Bcsr = Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (Bcsr->num_rows && Bcsr->num_cols) {
stat = cusparseCreateCsr(&mmdata->matSpBDescr, Bcsr->num_rows, Bcsr->num_cols, Bcsr->num_entries,
Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Bcsr->values->data().get(),
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
}
BmatSpDescr = mmdata->matSpBDescr;
#endif
}
if (!Acsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing A CSR struct");
if (!Bcsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing B CSR struct");
/* precompute flops count */
if (ptype == MATPRODUCT_AB) {
for (i=0, flops = 0; i<A->rmap->n; i++) {
const PetscInt st = a->i[i];
const PetscInt en = a->i[i+1];
for (j=st; j<en; j++) {
const PetscInt brow = a->j[j];
flops += 2.*(b->i[brow+1] - b->i[brow]);
}
}
} else if (ptype == MATPRODUCT_AtB) {
for (i=0, flops = 0; i<A->rmap->n; i++) {
const PetscInt anzi = a->i[i+1] - a->i[i];
const PetscInt bnzi = b->i[i+1] - b->i[i];
flops += (2.*anzi)*bnzi;
}
} else { /* TODO */
flops = 0.;
}
mmdata->flops = flops;
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
stat = cusparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, 0,
NULL, NULL, NULL,
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
stat = cusparseSpGEMM_createDescr(&mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(11,4,0)
{
/* cusparseSpGEMMreuse has more reasonable APIs than cusparseSpGEMM, so we prefer to use it.
We follow the sample code at https://github.com/NVIDIA/CUDALibrarySamples/blob/master/cuSPARSE/spgemm_reuse
*/
void* dBuffer1 = NULL;
void* dBuffer2 = NULL;
void* dBuffer3 = NULL;
/* dBuffer4, dBuffer5 are needed by cusparseSpGEMMreuse_compute, and therefore are stored in mmdata */
size_t bufferSize1 = 0;
size_t bufferSize2 = 0;
size_t bufferSize3 = 0;
size_t bufferSize4 = 0;
size_t bufferSize5 = 0;
/*----------------------------------------------------------------------*/
/* ask bufferSize1 bytes for external memory */
stat = cusparseSpGEMMreuse_workEstimation(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr,
CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc,
&bufferSize1, NULL);CHKERRCUSPARSE(stat);
cerr = cudaMalloc((void**) &dBuffer1, bufferSize1);CHKERRCUDA(cerr);
/* inspect the matrices A and B to understand the memory requirement for the next step */
stat = cusparseSpGEMMreuse_workEstimation(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr,
CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc,
&bufferSize1, dBuffer1);CHKERRCUSPARSE(stat);
/*----------------------------------------------------------------------*/
stat = cusparseSpGEMMreuse_nnz(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr,
CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc,
&bufferSize2, NULL, &bufferSize3, NULL, &bufferSize4, NULL);CHKERRCUSPARSE(stat);
cerr = cudaMalloc((void**) &dBuffer2, bufferSize2);CHKERRCUDA(cerr);
cerr = cudaMalloc((void**) &dBuffer3, bufferSize3);CHKERRCUDA(cerr);
cerr = cudaMalloc((void**) &mmdata->dBuffer4, bufferSize4);CHKERRCUDA(cerr);
stat = cusparseSpGEMMreuse_nnz(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr,
CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc,
&bufferSize2, dBuffer2, &bufferSize3, dBuffer3, &bufferSize4, mmdata->dBuffer4);CHKERRCUSPARSE(stat);
cerr = cudaFree(dBuffer1);CHKERRCUDA(cerr);
cerr = cudaFree(dBuffer2);CHKERRCUDA(cerr);
/*----------------------------------------------------------------------*/
/* get matrix C non-zero entries C_nnz1 */
stat = cusparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1);CHKERRCUSPARSE(stat);
c->nz = (PetscInt) C_nnz1;
/* allocate matrix C */
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */
Ccsr->values = new THRUSTARRAY(c->nz);CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */
/* update matC with the new pointers */
stat = cusparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(),
Ccsr->values->data().get());CHKERRCUSPARSE(stat);
/*----------------------------------------------------------------------*/
stat = cusparseSpGEMMreuse_copy(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr,
CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc,
&bufferSize5, NULL);CHKERRCUSPARSE(stat);
cerr = cudaMalloc((void**) &mmdata->dBuffer5, bufferSize5);CHKERRCUDA(cerr);
stat = cusparseSpGEMMreuse_copy(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr,
CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc,
&bufferSize5, mmdata->dBuffer5);CHKERRCUSPARSE(stat);
cerr = cudaFree(dBuffer3);CHKERRCUDA(cerr);
stat = cusparseSpGEMMreuse_compute(Ccusp->handle, opA, opB,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
ierr = PetscInfo9(C,"Buffer sizes for type %s, result %D x %D (k %D, nzA %D, nzB %D, nzC %D) are: %ldKB %ldKB\n",MatProductTypes[ptype],m,n,k,a->nz,b->nz,c->nz,bufferSize4/1024,bufferSize5/1024);CHKERRQ(ierr);
}
#else // ~PETSC_PKG_CUDA_VERSION_GE(11,4,0)
size_t bufSize2;
/* ask bufferSize bytes for external memory */
stat = cusparseSpGEMM_workEstimation(Ccusp->handle, opA, opB,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &bufSize2, NULL);CHKERRCUSPARSE(stat);
cerr = cudaMalloc((void**) &mmdata->mmBuffer2, bufSize2);CHKERRCUDA(cerr);
/* inspect the matrices A and B to understand the memory requirement for the next step */
stat = cusparseSpGEMM_workEstimation(Ccusp->handle, opA, opB,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &bufSize2, mmdata->mmBuffer2);CHKERRCUSPARSE(stat);
/* ask bufferSize again bytes for external memory */
stat = cusparseSpGEMM_compute(Ccusp->handle, opA, opB,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &mmdata->mmBufferSize, NULL);CHKERRCUSPARSE(stat);
/* The CUSPARSE documentation is not clear, nor the API
We need both buffers to perform the operations properly!
mmdata->mmBuffer2 does not appear anywhere in the compute/copy API
it only appears for the workEstimation stuff, but it seems it is needed in compute, so probably the address
is stored in the descriptor! What a messy API... */
cerr = cudaMalloc((void**) &mmdata->mmBuffer, mmdata->mmBufferSize);CHKERRCUDA(cerr);
/* compute the intermediate product of A * B */
stat = cusparseSpGEMM_compute(Ccusp->handle, opA, opB,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);CHKERRCUSPARSE(stat);
/* get matrix C non-zero entries C_nnz1 */
stat = cusparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1);CHKERRCUSPARSE(stat);
c->nz = (PetscInt) C_nnz1;
ierr = PetscInfo9(C,"Buffer sizes for type %s, result %D x %D (k %D, nzA %D, nzB %D, nzC %D) are: %ldKB %ldKB\n",MatProductTypes[ptype],m,n,k,a->nz,b->nz,c->nz,bufSize2/1024,mmdata->mmBufferSize/1024);CHKERRQ(ierr);
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */
Ccsr->values = new THRUSTARRAY(c->nz);
CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */
stat = cusparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(),
Ccsr->values->data().get());CHKERRCUSPARSE(stat);
stat = cusparseSpGEMM_copy(Ccusp->handle, opA, opB,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
#endif
#else
stat = cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_HOST);CHKERRCUSPARSE(stat);
stat = cusparseXcsrgemmNnz(Ccusp->handle, opA, opB,
Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols,
Amat->descr, Acsr->num_entries, Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(),
Bmat->descr, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Cmat->descr, Ccsr->row_offsets->data().get(), &cnz);CHKERRCUSPARSE(stat);
c->nz = cnz;
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */
Ccsr->values = new THRUSTARRAY(c->nz);
CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */
stat = cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
/* with the old gemm interface (removed from 11.0 on) we cannot compute the symbolic factorization only.
I have tried using the gemm2 interface (alpha * A * B + beta * D), which allows to do symbolic by passing NULL for values, but it seems quite buggy when
D is NULL, despite the fact that CUSPARSE documentation claims it is supported! */
stat = cusparse_csr_spgemm(Ccusp->handle, opA, opB,
Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols,
Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(),
Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());CHKERRCUSPARSE(stat);
#endif
ierr = PetscLogGpuFlops(mmdata->flops);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
finalizesym:
c->singlemalloc = PETSC_FALSE;
c->free_a = PETSC_TRUE;
c->free_ij = PETSC_TRUE;
ierr = PetscMalloc1(m+1,&c->i);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->j);CHKERRQ(ierr);
if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */
PetscInt *d_i = c->i;
THRUSTINTARRAY ii(Ccsr->row_offsets->size());
THRUSTINTARRAY jj(Ccsr->column_indices->size());
ii = *Ccsr->row_offsets;
jj = *Ccsr->column_indices;
if (ciscompressed) d_i = c->compressedrow.i;
cerr = cudaMemcpy(d_i,ii.data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = cudaMemcpy(c->j,jj.data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
} else {
PetscInt *d_i = c->i;
if (ciscompressed) d_i = c->compressedrow.i;
cerr = cudaMemcpy(d_i,Ccsr->row_offsets->data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = cudaMemcpy(c->j,Ccsr->column_indices->data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
if (ciscompressed) { /* need to expand host row offsets */
PetscInt r = 0;
c->i[0] = 0;
for (k = 0; k < c->compressedrow.nrows; k++) {
const PetscInt next = c->compressedrow.rindex[k];
const PetscInt old = c->compressedrow.i[k];
for (; r < next; r++) c->i[r+1] = old;
}
for (; r < m; r++) c->i[r+1] = c->compressedrow.i[c->compressedrow.nrows];
}
ierr = PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size())*sizeof(PetscInt));CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->ilen);CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->imax);CHKERRQ(ierr);
c->maxnz = c->nz;
c->nonzerorowcnt = 0;
c->rmax = 0;
for (k = 0; k < m; k++) {
const PetscInt nn = c->i[k+1] - c->i[k];
c->ilen[k] = c->imax[k] = nn;
c->nonzerorowcnt += (PetscInt)!!nn;
c->rmax = PetscMax(c->rmax,nn);
}
ierr = MatMarkDiagonal_SeqAIJ(C);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->a);CHKERRQ(ierr);
Ccsr->num_entries = c->nz;
C->nonzerostate++;
ierr = PetscLayoutSetUp(C->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(C->cmap);CHKERRQ(ierr);
Ccusp->nonzerostate = C->nonzerostate;
C->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
C->preallocated = PETSC_TRUE;
C->assembled = PETSC_FALSE;
C->was_assembled = PETSC_FALSE;
if (product->api_user && A->offloadmask == PETSC_OFFLOAD_BOTH && B->offloadmask == PETSC_OFFLOAD_BOTH) { /* flag the matrix C values as computed, so that the numeric phase will only call MatAssembly */
mmdata->reusesym = PETSC_TRUE;
C->offloadmask = PETSC_OFFLOAD_GPU;
}
C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatProductSetFromOptions_SeqAIJ_SeqDense(Mat);
/* handles sparse or dense B */
static PetscErrorCode MatProductSetFromOptions_SeqAIJCUSPARSE(Mat mat)
{
Mat_Product *product = mat->product;
PetscErrorCode ierr;
PetscBool isdense = PETSC_FALSE,Biscusp = PETSC_FALSE,Ciscusp = PETSC_TRUE;
PetscFunctionBegin;
MatCheckProduct(mat,1);
ierr = PetscObjectBaseTypeCompare((PetscObject)product->B,MATSEQDENSE,&isdense);CHKERRQ(ierr);
if (!product->A->boundtocpu && !product->B->boundtocpu) {
ierr = PetscObjectTypeCompare((PetscObject)product->B,MATSEQAIJCUSPARSE,&Biscusp);CHKERRQ(ierr);
}
if (product->type == MATPRODUCT_ABC) {
Ciscusp = PETSC_FALSE;
if (!product->C->boundtocpu) {
ierr = PetscObjectTypeCompare((PetscObject)product->C,MATSEQAIJCUSPARSE,&Ciscusp);CHKERRQ(ierr);
}
}
if (Biscusp && Ciscusp) { /* we can always select the CPU backend */
PetscBool usecpu = PETSC_FALSE;
switch (product->type) {
case MATPRODUCT_AB:
if (product->api_user) {
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatMatMult","Mat");CHKERRQ(ierr);
ierr = PetscOptionsBool("-matmatmult_backend_cpu","Use CPU code","MatMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
} else {
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_AB","Mat");CHKERRQ(ierr);
ierr = PetscOptionsBool("-matproduct_ab_backend_cpu","Use CPU code","MatMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
}
break;
case MATPRODUCT_AtB:
if (product->api_user) {
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatTransposeMatMult","Mat");CHKERRQ(ierr);
ierr = PetscOptionsBool("-mattransposematmult_backend_cpu","Use CPU code","MatTransposeMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
} else {
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_AtB","Mat");CHKERRQ(ierr);
ierr = PetscOptionsBool("-matproduct_atb_backend_cpu","Use CPU code","MatTransposeMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
}
break;
case MATPRODUCT_PtAP:
if (product->api_user) {
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatPtAP","Mat");CHKERRQ(ierr);
ierr = PetscOptionsBool("-matptap_backend_cpu","Use CPU code","MatPtAP",usecpu,&usecpu,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
} else {
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_PtAP","Mat");CHKERRQ(ierr);
ierr = PetscOptionsBool("-matproduct_ptap_backend_cpu","Use CPU code","MatPtAP",usecpu,&usecpu,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
}
break;
case MATPRODUCT_RARt:
if (product->api_user) {
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatRARt","Mat");CHKERRQ(ierr);
ierr = PetscOptionsBool("-matrart_backend_cpu","Use CPU code","MatRARt",usecpu,&usecpu,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
} else {
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_RARt","Mat");CHKERRQ(ierr);
ierr = PetscOptionsBool("-matproduct_rart_backend_cpu","Use CPU code","MatRARt",usecpu,&usecpu,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
}
break;
case MATPRODUCT_ABC:
if (product->api_user) {
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatMatMatMult","Mat");CHKERRQ(ierr);
ierr = PetscOptionsBool("-matmatmatmult_backend_cpu","Use CPU code","MatMatMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
} else {
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_ABC","Mat");CHKERRQ(ierr);
ierr = PetscOptionsBool("-matproduct_abc_backend_cpu","Use CPU code","MatMatMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
}
break;
default:
break;
}
if (usecpu) Biscusp = Ciscusp = PETSC_FALSE;
}
/* dispatch */
if (isdense) {
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_AtB:
case MATPRODUCT_ABt:
case MATPRODUCT_PtAP:
case MATPRODUCT_RARt:
if (product->A->boundtocpu) {
ierr = MatProductSetFromOptions_SeqAIJ_SeqDense(mat);CHKERRQ(ierr);
} else {
mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA;
}
break;
case MATPRODUCT_ABC:
mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic;
break;
default:
break;
}
} else if (Biscusp && Ciscusp) {
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_AtB:
case MATPRODUCT_ABt:
mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE;
break;
case MATPRODUCT_PtAP:
case MATPRODUCT_RARt:
case MATPRODUCT_ABC:
mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic;
break;
default:
break;
}
} else { /* fallback for AIJ */
ierr = MatProductSetFromOptions_SeqAIJ(mat);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy, Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_TRUE,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_TRUE,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
__global__ static void ScatterAdd(PetscInt n, PetscInt *idx,const PetscScalar *x,PetscScalar *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[idx[i]] += x[i];
}
/* z = op(A) x + y. If trans & !herm, op = ^T; if trans & herm, op = ^H; if !trans, op = no-op */
static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz,PetscBool trans,PetscBool herm)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct;
PetscScalar *xarray,*zarray,*dptr,*beta,*xptr;
PetscErrorCode ierr;
cusparseStatus_t stat;
cusparseOperation_t opA = CUSPARSE_OPERATION_NON_TRANSPOSE;
PetscBool compressed;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
PetscInt nx,ny;
#endif
PetscFunctionBegin;
if (herm && !trans) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_GPU,"Hermitian and not transpose not supported");
if (!a->nonzerorowcnt) {
if (!yy) {ierr = VecSet_SeqCUDA(zz,0);CHKERRQ(ierr);}
else {ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr);}
PetscFunctionReturn(0);
}
/* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
if (!trans) {
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat;
if (!matstruct) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_GPU,"SeqAIJCUSPARSE does not have a 'mat' (need to fix)");
} else {
if (herm || !A->form_explicit_transpose) {
opA = herm ? CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE : CUSPARSE_OPERATION_TRANSPOSE;
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat;
} else {
if (!cusparsestruct->matTranspose) {ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr);}
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose;
}
}
/* Does the matrix use compressed rows (i.e., drop zero rows)? */
compressed = matstruct->cprowIndices ? PETSC_TRUE : PETSC_FALSE;
try {
ierr = VecCUDAGetArrayRead(xx,(const PetscScalar**)&xarray);CHKERRQ(ierr);
if (yy == zz) {ierr = VecCUDAGetArray(zz,&zarray);CHKERRQ(ierr);} /* read & write zz, so need to get uptodate zarray on GPU */
else {ierr = VecCUDAGetArrayWrite(zz,&zarray);CHKERRQ(ierr);} /* write zz, so no need to init zarray on GPU */
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (opA == CUSPARSE_OPERATION_NON_TRANSPOSE) {
/* z = A x + beta y.
If A is compressed (with less rows), then Ax is shorter than the full z, so we need a work vector to store Ax.
When A is non-compressed, and z = y, we can set beta=1 to compute y = Ax + y in one call.
*/
xptr = xarray;
dptr = compressed ? cusparsestruct->workVector->data().get() : zarray;
beta = (yy == zz && !compressed) ? matstruct->beta_one : matstruct->beta_zero;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
/* Get length of x, y for y=Ax. ny might be shorter than the work vector's allocated length, since the work vector is
allocated to accommodate different uses. So we get the length info directly from mat.
*/
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *mat = (CsrMatrix*)matstruct->mat;
nx = mat->num_cols;
ny = mat->num_rows;
}
#endif
} else {
/* z = A^T x + beta y
If A is compressed, then we need a work vector as the shorter version of x to compute A^T x.
Note A^Tx is of full length, so we set beta to 1.0 if y exists.
*/
xptr = compressed ? cusparsestruct->workVector->data().get() : xarray;
dptr = zarray;
beta = yy ? matstruct->beta_one : matstruct->beta_zero;
if (compressed) { /* Scatter x to work vector */
thrust::device_ptr<PetscScalar> xarr = thrust::device_pointer_cast(xarray);
thrust::for_each(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(),
VecCUDAEqualsReverse());
}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *mat = (CsrMatrix*)matstruct->mat;
nx = mat->num_rows;
ny = mat->num_cols;
}
#endif
}
/* csr_spmv does y = alpha op(A) x + beta y */
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (opA < 0 || opA > 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE ABI on cusparseOperation_t has changed and PETSc has not been updated accordingly");
if (!matstruct->cuSpMV[opA].initialized) { /* built on demand */
cudaError_t cerr;
stat = cusparseCreateDnVec(&matstruct->cuSpMV[opA].vecXDescr,nx,xptr,cusparse_scalartype);CHKERRCUSPARSE(stat);
stat = cusparseCreateDnVec(&matstruct->cuSpMV[opA].vecYDescr,ny,dptr,cusparse_scalartype);CHKERRCUSPARSE(stat);
stat = cusparseSpMV_bufferSize(cusparsestruct->handle, opA, matstruct->alpha_one,
matstruct->matDescr,
matstruct->cuSpMV[opA].vecXDescr, beta,
matstruct->cuSpMV[opA].vecYDescr,
cusparse_scalartype,
cusparsestruct->spmvAlg,
&matstruct->cuSpMV[opA].spmvBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&matstruct->cuSpMV[opA].spmvBuffer,matstruct->cuSpMV[opA].spmvBufferSize);CHKERRCUDA(cerr);
matstruct->cuSpMV[opA].initialized = PETSC_TRUE;
} else {
/* x, y's value pointers might change between calls, but their shape is kept, so we just update pointers */
stat = cusparseDnVecSetValues(matstruct->cuSpMV[opA].vecXDescr,xptr);CHKERRCUSPARSE(stat);
stat = cusparseDnVecSetValues(matstruct->cuSpMV[opA].vecYDescr,dptr);CHKERRCUSPARSE(stat);
}
stat = cusparseSpMV(cusparsestruct->handle, opA,
matstruct->alpha_one,
matstruct->matDescr, /* built in MatSeqAIJCUSPARSECopyToGPU() or MatSeqAIJCUSPARSEFormExplicitTransposeForMult() */
matstruct->cuSpMV[opA].vecXDescr,
beta,
matstruct->cuSpMV[opA].vecYDescr,
cusparse_scalartype,
cusparsestruct->spmvAlg,
matstruct->cuSpMV[opA].spmvBuffer);CHKERRCUSPARSE(stat);
#else
CsrMatrix *mat = (CsrMatrix*)matstruct->mat;
stat = cusparse_csr_spmv(cusparsestruct->handle, opA,
mat->num_rows, mat->num_cols,
mat->num_entries, matstruct->alpha_one, matstruct->descr,
mat->values->data().get(), mat->row_offsets->data().get(),
mat->column_indices->data().get(), xptr, beta,
dptr);CHKERRCUSPARSE(stat);
#endif
} else {
if (cusparsestruct->nrows) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat;
stat = cusparse_hyb_spmv(cusparsestruct->handle, opA,
matstruct->alpha_one, matstruct->descr, hybMat,
xptr, beta,
dptr);CHKERRCUSPARSE(stat);
#endif
}
}
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
if (opA == CUSPARSE_OPERATION_NON_TRANSPOSE) {
if (yy) { /* MatMultAdd: zz = A*xx + yy */
if (compressed) { /* A is compressed. We first copy yy to zz, then ScatterAdd the work vector to zz */
ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); /* zz = yy */
} else if (zz != yy) { /* A is not compressed. zz already contains A*xx, and we just need to add yy */
ierr = VecAXPY_SeqCUDA(zz,1.0,yy);CHKERRQ(ierr); /* zz += yy */
}
} else if (compressed) { /* MatMult: zz = A*xx. A is compressed, so we zero zz first, then ScatterAdd the work vector to zz */
ierr = VecSet_SeqCUDA(zz,0);CHKERRQ(ierr);
}
/* ScatterAdd the result from work vector into the full vector when A is compressed */
if (compressed) {
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* I wanted to make this for_each asynchronous but failed. thrust::async::for_each() returns an event (internally registerred)
and in the destructor of the scope, it will call cudaStreamSynchronize() on this stream. One has to store all events to
prevent that. So I just add a ScatterAdd kernel.
*/
#if 0
thrust::device_ptr<PetscScalar> zptr = thrust::device_pointer_cast(zarray);
thrust::async::for_each(thrust::cuda::par.on(cusparsestruct->stream),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(),
VecCUDAPlusEquals());
#else
PetscInt n = matstruct->cprowIndices->size();
ScatterAdd<<<(n+255)/256,256,0,PetscDefaultCudaStream>>>(n,matstruct->cprowIndices->data().get(),cusparsestruct->workVector->data().get(),zarray);
#endif
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
}
} else {
if (yy && yy != zz) {
ierr = VecAXPY_SeqCUDA(zz,1.0,yy);CHKERRQ(ierr); /* zz += yy */
}
}
ierr = VecCUDARestoreArrayRead(xx,(const PetscScalar**)&xarray);CHKERRQ(ierr);
if (yy == zz) {ierr = VecCUDARestoreArray(zz,&zarray);CHKERRQ(ierr);}
else {ierr = VecCUDARestoreArrayWrite(zz,&zarray);CHKERRQ(ierr);}
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
if (yy) {
ierr = PetscLogGpuFlops(2.0*a->nz);CHKERRQ(ierr);
} else {
ierr = PetscLogGpuFlops(2.0*a->nz-a->nonzerorowcnt);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A,MatAssemblyType mode)
{
PetscErrorCode ierr;
PetscObjectState onnz = A->nonzerostate;
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
ierr = MatAssemblyEnd_SeqAIJ(A,mode);CHKERRQ(ierr);
if (onnz != A->nonzerostate && cusp->deviceMat) {
cudaError_t cerr;
ierr = PetscInfo(A,"Destroy device mat since nonzerostate changed\n");CHKERRQ(ierr);
cerr = cudaFree(cusp->deviceMat);CHKERRCUDA(cerr);
cusp->deviceMat = NULL;
}
PetscFunctionReturn(0);
}
/* --------------------------------------------------------------------------------*/
/*@
MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format
(the default parallel PETSc format). This matrix will ultimately pushed down
to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix
assembly performance the user should preallocate the matrix storage by setting
the parameter nz (or the array nnz). By setting these parameters accurately,
performance during matrix assembly can be increased by more than a factor of 50.
Collective
Input Parameters:
+ comm - MPI communicator, set to PETSC_COMM_SELF
. m - number of rows
. n - number of columns
. nz - number of nonzeros per row (same for all rows)
- nnz - array containing the number of nonzeros in the various rows
(possibly different for each row) or NULL
Output Parameter:
. A - the matrix
It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
MatXXXXSetPreallocation() paradgm instead of this routine directly.
[MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
Notes:
If nnz is given then nz is ignored
The AIJ format (also called the Yale sparse matrix format or
compressed row storage), is fully compatible with standard Fortran 77
storage. That is, the stored row and column indices can begin at
either one (as in Fortran) or zero. See the users' manual for details.
Specify the preallocated storage with either nz or nnz (not both).
Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory
allocation. For large problems you MUST preallocate memory or you
will get TERRIBLE performance, see the users' manual chapter on matrices.
By default, this format uses inodes (identical nodes) when possible, to
improve numerical efficiency of matrix-vector products and solves. We
search for consecutive rows with the same nonzero structure, thereby
reusing matrix information to achieve increased efficiency.
Level: intermediate
.seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATSEQAIJCUSPARSE, MATAIJCUSPARSE
@*/
PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatCreate(comm,A);CHKERRQ(ierr);
ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr);
ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,(PetscInt*)nnz);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) {
ierr = MatSeqAIJCUSPARSE_Destroy((Mat_SeqAIJCUSPARSE**)&A->spptr);CHKERRQ(ierr);
} else {
ierr = MatSeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors**)&A->spptr);CHKERRQ(ierr);
}
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPARSESetFormat_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatFactorGetSolverType_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatConvert_seqaijcusparse_hypre_C",NULL);CHKERRQ(ierr);
ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat,MatType,MatReuse,Mat*);
static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat,PetscBool);
static PetscErrorCode MatDuplicate_SeqAIJCUSPARSE(Mat A,MatDuplicateOption cpvalues,Mat *B)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatDuplicate_SeqAIJ(A,cpvalues,B);CHKERRQ(ierr);
ierr = MatConvert_SeqAIJ_SeqAIJCUSPARSE(*B,MATSEQAIJCUSPARSE,MAT_INPLACE_MATRIX,B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat Y,PetscScalar a,Mat X,MatStructure str)
{
PetscErrorCode ierr;
Mat_SeqAIJ *x = (Mat_SeqAIJ*)X->data,*y = (Mat_SeqAIJ*)Y->data;
Mat_SeqAIJCUSPARSE *cy;
Mat_SeqAIJCUSPARSE *cx;
PetscScalar *ay;
const PetscScalar *ax;
CsrMatrix *csry,*csrx;
PetscFunctionBegin;
cy = (Mat_SeqAIJCUSPARSE*)Y->spptr;
cx = (Mat_SeqAIJCUSPARSE*)X->spptr;
if (X->ops->axpy != Y->ops->axpy) {
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(Y,PETSC_FALSE);CHKERRQ(ierr);
ierr = MatAXPY_SeqAIJ(Y,a,X,str);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* if we are here, it means both matrices are bound to GPU */
ierr = MatSeqAIJCUSPARSECopyToGPU(Y);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(X);CHKERRQ(ierr);
if (cy->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)Y),PETSC_ERR_GPU,"only MAT_CUSPARSE_CSR supported");
if (cx->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)X),PETSC_ERR_GPU,"only MAT_CUSPARSE_CSR supported");
csry = (CsrMatrix*)cy->mat->mat;
csrx = (CsrMatrix*)cx->mat->mat;
/* see if we can turn this into a cublas axpy */
if (str != SAME_NONZERO_PATTERN && x->nz == y->nz && !x->compressedrow.use && !y->compressedrow.use) {
bool eq = thrust::equal(thrust::device,csry->row_offsets->begin(),csry->row_offsets->end(),csrx->row_offsets->begin());
if (eq) {
eq = thrust::equal(thrust::device,csry->column_indices->begin(),csry->column_indices->end(),csrx->column_indices->begin());
}
if (eq) str = SAME_NONZERO_PATTERN;
}
/* spgeam is buggy with one column */
if (Y->cmap->n == 1 && str != SAME_NONZERO_PATTERN) str = DIFFERENT_NONZERO_PATTERN;
if (str == SUBSET_NONZERO_PATTERN) {
cusparseStatus_t stat;
PetscScalar b = 1.0;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
size_t bufferSize;
void *buffer;
cudaError_t cerr;
#endif
ierr = MatSeqAIJCUSPARSEGetArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr);
stat = cusparseSetPointerMode(cy->handle, CUSPARSE_POINTER_MODE_HOST);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparse_csr_spgeam_bufferSize(cy->handle,Y->rmap->n,Y->cmap->n,
&a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(),
&b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),
cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),&bufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&buffer,bufferSize);CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
stat = cusparse_csr_spgeam(cy->handle,Y->rmap->n,Y->cmap->n,
&a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(),
&b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),
cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),buffer);CHKERRCUSPARSE(stat);
ierr = PetscLogGpuFlops(x->nz + y->nz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
cerr = cudaFree(buffer);CHKERRCUDA(cerr);
#else
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
stat = cusparse_csr_spgeam(cy->handle,Y->rmap->n,Y->cmap->n,
&a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(),
&b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),
cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get());CHKERRCUSPARSE(stat);
ierr = PetscLogGpuFlops(x->nz + y->nz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
#endif
stat = cusparseSetPointerMode(cy->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
ierr = MatSeqAIJCUSPARSERestoreArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr);
} else if (str == SAME_NONZERO_PATTERN) {
cublasHandle_t cublasv2handle;
cublasStatus_t berr;
PetscBLASInt one = 1, bnz = 1;
ierr = MatSeqAIJCUSPARSEGetArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr);
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
ierr = PetscBLASIntCast(x->nz,&bnz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
berr = cublasXaxpy(cublasv2handle,bnz,&a,ax,one,ay,one);CHKERRCUBLAS(berr);
ierr = PetscLogGpuFlops(2.0*bnz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr);
} else {
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(Y,PETSC_FALSE);CHKERRQ(ierr);
ierr = MatAXPY_SeqAIJ(Y,a,X,str);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat Y,PetscScalar a)
{
PetscErrorCode ierr;
Mat_SeqAIJ *y = (Mat_SeqAIJ*)Y->data;
PetscScalar *ay;
cublasHandle_t cublasv2handle;
cublasStatus_t berr;
PetscBLASInt one = 1, bnz = 1;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr);
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
ierr = PetscBLASIntCast(y->nz,&bnz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
berr = cublasXscal(cublasv2handle,bnz,&a,ay,one);CHKERRCUBLAS(berr);
ierr = PetscLogGpuFlops(bnz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatZeroEntries_SeqAIJCUSPARSE(Mat A)
{
PetscErrorCode ierr;
PetscBool both = PETSC_FALSE;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) {
Mat_SeqAIJCUSPARSE *spptr = (Mat_SeqAIJCUSPARSE*)A->spptr;
if (spptr->mat) {
CsrMatrix* matrix = (CsrMatrix*)spptr->mat->mat;
if (matrix->values) {
both = PETSC_TRUE;
thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.);
}
}
if (spptr->matTranspose) {
CsrMatrix* matrix = (CsrMatrix*)spptr->matTranspose->mat;
if (matrix->values) {
thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.);
}
}
}
//ierr = MatZeroEntries_SeqAIJ(A);CHKERRQ(ierr);
ierr = PetscArrayzero(a->a,a->i[A->rmap->n]);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(A);CHKERRQ(ierr);
if (both) A->offloadmask = PETSC_OFFLOAD_BOTH;
else A->offloadmask = PETSC_OFFLOAD_CPU;
PetscFunctionReturn(0);
}
static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat A,PetscBool flg)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (A->factortype != MAT_FACTOR_NONE) PetscFunctionReturn(0);
if (flg) {
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
A->ops->scale = MatScale_SeqAIJ;
A->ops->axpy = MatAXPY_SeqAIJ;
A->ops->zeroentries = MatZeroEntries_SeqAIJ;
A->ops->mult = MatMult_SeqAIJ;
A->ops->multadd = MatMultAdd_SeqAIJ;
A->ops->multtranspose = MatMultTranspose_SeqAIJ;
A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJ;
A->ops->multhermitiantranspose = NULL;
A->ops->multhermitiantransposeadd = NULL;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJ;
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJGetArray_C",MatSeqAIJGetArray_SeqAIJ);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",NULL);CHKERRQ(ierr);
} else {
A->ops->scale = MatScale_SeqAIJCUSPARSE;
A->ops->axpy = MatAXPY_SeqAIJCUSPARSE;
A->ops->zeroentries = MatZeroEntries_SeqAIJCUSPARSE;
A->ops->mult = MatMult_SeqAIJCUSPARSE;
A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE;
A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE;
A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE;
A->ops->multhermitiantranspose = MatMultHermitianTranspose_SeqAIJCUSPARSE;
A->ops->multhermitiantransposeadd = MatMultHermitianTransposeAdd_SeqAIJCUSPARSE;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJCUSPARSE;
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",MatSeqAIJCopySubArray_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",MatSetPreallocationCOO_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",MatSetValuesCOO_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJGetArray_C",MatSeqAIJGetArray_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr);
}
A->boundtocpu = flg;
a->inode.use = flg;
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat A, MatType mtype, MatReuse reuse, Mat* newmat)
{
PetscErrorCode ierr;
cusparseStatus_t stat;
Mat B;
PetscFunctionBegin;
ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); /* first use of CUSPARSE may be via MatConvert */
if (reuse == MAT_INITIAL_MATRIX) {
ierr = MatDuplicate(A,MAT_COPY_VALUES,newmat);CHKERRQ(ierr);
} else if (reuse == MAT_REUSE_MATRIX) {
ierr = MatCopy(A,*newmat,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
}
B = *newmat;
ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr);
ierr = PetscStrallocpy(VECCUDA,&B->defaultvectype);CHKERRQ(ierr);
if (reuse != MAT_REUSE_MATRIX && !B->spptr) {
if (B->factortype == MAT_FACTOR_NONE) {
Mat_SeqAIJCUSPARSE *spptr;
ierr = PetscNew(&spptr);CHKERRQ(ierr);
stat = cusparseCreate(&spptr->handle);CHKERRCUSPARSE(stat);
stat = cusparseSetStream(spptr->handle,PetscDefaultCudaStream);CHKERRCUSPARSE(stat);
spptr->format = MAT_CUSPARSE_CSR;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
#if PETSC_PKG_CUDA_VERSION_GE(11,4,0)
spptr->spmvAlg = CUSPARSE_SPMV_CSR_ALG1; /* default, since we only support csr */
#else
spptr->spmvAlg = CUSPARSE_CSRMV_ALG1; /* default, since we only support csr */
#endif
spptr->spmmAlg = CUSPARSE_SPMM_CSR_ALG1; /* default, only support column-major dense matrix B */
spptr->csr2cscAlg = CUSPARSE_CSR2CSC_ALG1;
#endif
B->spptr = spptr;
} else {
Mat_SeqAIJCUSPARSETriFactors *spptr;
ierr = PetscNew(&spptr);CHKERRQ(ierr);
stat = cusparseCreate(&spptr->handle);CHKERRCUSPARSE(stat);
stat = cusparseSetStream(spptr->handle,PetscDefaultCudaStream);CHKERRCUSPARSE(stat);
B->spptr = spptr;
}
B->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
}
B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE;
B->ops->destroy = MatDestroy_SeqAIJCUSPARSE;
B->ops->setoption = MatSetOption_SeqAIJCUSPARSE;
B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE;
B->ops->bindtocpu = MatBindToCPU_SeqAIJCUSPARSE;
B->ops->duplicate = MatDuplicate_SeqAIJCUSPARSE;
ierr = MatBindToCPU_SeqAIJCUSPARSE(B,PETSC_FALSE);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatCUSPARSESetFormat_C",MatCUSPARSESetFormat_SeqAIJCUSPARSE);CHKERRQ(ierr);
#if defined(PETSC_HAVE_HYPRE)
ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqaijcusparse_hypre_C",MatConvert_AIJ_HYPRE);CHKERRQ(ierr);
#endif
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatCreate_SeqAIJ(B);CHKERRQ(ierr);
ierr = MatConvert_SeqAIJ_SeqAIJCUSPARSE(B,MATSEQAIJCUSPARSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*MC
MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices.
A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either
CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later.
All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library.
Options Database Keys:
+ -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to MatSetFromOptions()
. -mat_cusparse_storage_format csr - sets the storage format of matrices (for MatMult and factors in MatSolve) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid).
- -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for MatMult) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid).
Level: beginner
.seealso: MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation
M*/
PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse_band(Mat,MatFactorType,Mat*);
PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_CUSPARSE(void)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSEBAND, MATSEQAIJ, MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse_band);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_CHOLESKY,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ILU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ICC,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **cusparsestruct)
{
PetscErrorCode ierr;
cusparseStatus_t stat;
PetscFunctionBegin;
if (*cusparsestruct) {
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->mat,(*cusparsestruct)->format);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->matTranspose,(*cusparsestruct)->format);CHKERRQ(ierr);
delete (*cusparsestruct)->workVector;
delete (*cusparsestruct)->rowoffsets_gpu;
delete (*cusparsestruct)->cooPerm;
delete (*cusparsestruct)->cooPerm_a;
delete (*cusparsestruct)->csr2csc_i;
if ((*cusparsestruct)->handle) {stat = cusparseDestroy((*cusparsestruct)->handle);CHKERRCUSPARSE(stat);}
ierr = PetscFree(*cusparsestruct);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat)
{
PetscFunctionBegin;
if (*mat) {
delete (*mat)->values;
delete (*mat)->column_indices;
delete (*mat)->row_offsets;
delete *mat;
*mat = 0;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor)
{
cusparseStatus_t stat;
PetscErrorCode ierr;
PetscFunctionBegin;
if (*trifactor) {
if ((*trifactor)->descr) { stat = cusparseDestroyMatDescr((*trifactor)->descr);CHKERRCUSPARSE(stat); }
if ((*trifactor)->solveInfo) { stat = cusparse_destroy_analysis_info((*trifactor)->solveInfo);CHKERRCUSPARSE(stat); }
ierr = CsrMatrix_Destroy(&(*trifactor)->csrMat);CHKERRQ(ierr);
if ((*trifactor)->solveBuffer) {cudaError_t cerr = cudaFree((*trifactor)->solveBuffer);CHKERRCUDA(cerr);}
if ((*trifactor)->AA_h) {cudaError_t cerr = cudaFreeHost((*trifactor)->AA_h);CHKERRCUDA(cerr);}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if ((*trifactor)->csr2cscBuffer) {cudaError_t cerr = cudaFree((*trifactor)->csr2cscBuffer);CHKERRCUDA(cerr);}
#endif
ierr = PetscFree(*trifactor);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct,MatCUSPARSEStorageFormat format)
{
CsrMatrix *mat;
cusparseStatus_t stat;
cudaError_t err;
PetscFunctionBegin;
if (*matstruct) {
if ((*matstruct)->mat) {
if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat;
stat = cusparseDestroyHybMat(hybMat);CHKERRCUSPARSE(stat);
#endif
} else {
mat = (CsrMatrix*)(*matstruct)->mat;
CsrMatrix_Destroy(&mat);
}
}
if ((*matstruct)->descr) { stat = cusparseDestroyMatDescr((*matstruct)->descr);CHKERRCUSPARSE(stat); }
delete (*matstruct)->cprowIndices;
if ((*matstruct)->alpha_one) { err=cudaFree((*matstruct)->alpha_one);CHKERRCUDA(err); }
if ((*matstruct)->beta_zero) { err=cudaFree((*matstruct)->beta_zero);CHKERRCUDA(err); }
if ((*matstruct)->beta_one) { err=cudaFree((*matstruct)->beta_one);CHKERRCUDA(err); }
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
Mat_SeqAIJCUSPARSEMultStruct *mdata = *matstruct;
if (mdata->matDescr) {stat = cusparseDestroySpMat(mdata->matDescr);CHKERRCUSPARSE(stat);}
for (int i=0; i<3; i++) {
if (mdata->cuSpMV[i].initialized) {
err = cudaFree(mdata->cuSpMV[i].spmvBuffer);CHKERRCUDA(err);
stat = cusparseDestroyDnVec(mdata->cuSpMV[i].vecXDescr);CHKERRCUSPARSE(stat);
stat = cusparseDestroyDnVec(mdata->cuSpMV[i].vecYDescr);CHKERRCUSPARSE(stat);
}
}
#endif
delete *matstruct;
*matstruct = NULL;
}
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors_p* trifactors)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (*trifactors) {
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->loTriFactorPtr);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->upTriFactorPtr);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->loTriFactorPtrTranspose);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->upTriFactorPtrTranspose);CHKERRQ(ierr);
delete (*trifactors)->rpermIndices;
delete (*trifactors)->cpermIndices;
delete (*trifactors)->workVector;
(*trifactors)->rpermIndices = NULL;
(*trifactors)->cpermIndices = NULL;
(*trifactors)->workVector = NULL;
if ((*trifactors)->a_band_d) {cudaError_t cerr = cudaFree((*trifactors)->a_band_d);CHKERRCUDA(cerr);}
if ((*trifactors)->i_band_d) {cudaError_t cerr = cudaFree((*trifactors)->i_band_d);CHKERRCUDA(cerr);}
(*trifactors)->init_dev_prop = PETSC_FALSE;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors** trifactors)
{
PetscErrorCode ierr;
cusparseHandle_t handle;
cusparseStatus_t stat;
PetscFunctionBegin;
if (*trifactors) {
ierr = MatSeqAIJCUSPARSETriFactors_Reset(trifactors);CHKERRQ(ierr);
if (handle = (*trifactors)->handle) {
stat = cusparseDestroy(handle);CHKERRCUSPARSE(stat);
}
ierr = PetscFree(*trifactors);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
struct IJCompare
{
__host__ __device__
inline bool operator() (const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2)
{
if (t1.get<0>() < t2.get<0>()) return true;
if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>();
return false;
}
};
struct IJEqual
{
__host__ __device__
inline bool operator() (const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2)
{
if (t1.get<0>() != t2.get<0>() || t1.get<1>() != t2.get<1>()) return false;
return true;
}
};
struct IJDiff
{
__host__ __device__
inline PetscInt operator() (const PetscInt &t1, const PetscInt &t2)
{
return t1 == t2 ? 0 : 1;
}
};
struct IJSum
{
__host__ __device__
inline PetscInt operator() (const PetscInt &t1, const PetscInt &t2)
{
return t1||t2;
}
};
#include <thrust/iterator/discard_iterator.h>
PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat A, const PetscScalar v[], InsertMode imode)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
THRUSTARRAY *cooPerm_v = NULL;
thrust::device_ptr<const PetscScalar> d_v;
CsrMatrix *matrix;
PetscErrorCode ierr;
PetscInt n;
PetscFunctionBegin;
if (!cusp) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUSPARSE struct");
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUSPARSE CsrMatrix");
if (!cusp->cooPerm) {
ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
matrix = (CsrMatrix*)cusp->mat->mat;
if (!matrix->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
if (!v) {
if (imode == INSERT_VALUES) thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.);
goto finalize;
}
n = cusp->cooPerm->size();
if (isCudaMem(v)) {
d_v = thrust::device_pointer_cast(v);
} else {
cooPerm_v = new THRUSTARRAY(n);
cooPerm_v->assign(v,v+n);
d_v = cooPerm_v->data();
ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr);
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (imode == ADD_VALUES) { /* ADD VALUES means add to existing ones */
if (cusp->cooPerm_a) { /* there are repeated entries in d_v[], and we need to add these them */
THRUSTARRAY *cooPerm_w = new THRUSTARRAY(matrix->values->size());
auto vbit = thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin());
/* thrust::reduce_by_key(keys_first,keys_last,values_first,keys_output,values_output)
cooPerm_a = [0,0,1,2,3,4]. The length is n, number of nonozeros in d_v[].
cooPerm_a is ordered. d_v[i] is the cooPerm_a[i]-th unique nonzero.
*/
thrust::reduce_by_key(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),vbit,thrust::make_discard_iterator(),cooPerm_w->begin(),thrust::equal_to<PetscInt>(),thrust::plus<PetscScalar>());
thrust::transform(cooPerm_w->begin(),cooPerm_w->end(),matrix->values->begin(),matrix->values->begin(),thrust::plus<PetscScalar>());
delete cooPerm_w;
} else {
/* all nonzeros in d_v[] are unique entries */
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()),
matrix->values->begin()));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->end()),
matrix->values->end()));
thrust::for_each(zibit,zieit,VecCUDAPlusEquals()); /* values[i] += d_v[cooPerm[i]] */
}
} else {
if (cusp->cooPerm_a) { /* repeated entries in COO, with INSERT_VALUES -> reduce */
auto vbit = thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin());
thrust::reduce_by_key(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),vbit,thrust::make_discard_iterator(),matrix->values->begin(),thrust::equal_to<PetscInt>(),thrust::plus<PetscScalar>());
} else {
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()),
matrix->values->begin()));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->end()),
matrix->values->end()));
thrust::for_each(zibit,zieit,VecCUDAEquals());
}
}
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
finalize:
delete cooPerm_v;
A->offloadmask = PETSC_OFFLOAD_GPU;
ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr);
/* shorter version of MatAssemblyEnd_SeqAIJ */
ierr = PetscInfo3(A,"Matrix size: %D X %D; storage space: 0 unneeded,%D used\n",A->rmap->n,A->cmap->n,a->nz);CHKERRQ(ierr);
ierr = PetscInfo(A,"Number of mallocs during MatSetValues() is 0\n");CHKERRQ(ierr);
ierr = PetscInfo1(A,"Maximum nonzeros in any row is %D\n",a->rmax);CHKERRQ(ierr);
a->reallocs = 0;
A->info.mallocs += 0;
A->info.nz_unneeded = 0;
A->assembled = A->was_assembled = PETSC_TRUE;
A->num_ass++;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat A, PetscBool destroy)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (!cusp) PetscFunctionReturn(0);
if (destroy) {
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->matTranspose,cusp->format);CHKERRQ(ierr);
delete cusp->csr2csc_i;
cusp->csr2csc_i = NULL;
}
A->transupdated = PETSC_FALSE;
PetscFunctionReturn(0);
}
#include <thrust/binary_search.h>
PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt coo_i[], const PetscInt coo_j[])
{
PetscErrorCode ierr;
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt cooPerm_n, nzr = 0;
cudaError_t cerr;
PetscFunctionBegin;
ierr = PetscLayoutSetUp(A->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(A->cmap);CHKERRQ(ierr);
cooPerm_n = cusp->cooPerm ? cusp->cooPerm->size() : 0;
if (n != cooPerm_n) {
delete cusp->cooPerm;
delete cusp->cooPerm_a;
cusp->cooPerm = NULL;
cusp->cooPerm_a = NULL;
}
if (n) {
THRUSTINTARRAY d_i(n);
THRUSTINTARRAY d_j(n);
THRUSTINTARRAY ii(A->rmap->n);
if (!cusp->cooPerm) { cusp->cooPerm = new THRUSTINTARRAY(n); }
if (!cusp->cooPerm_a) { cusp->cooPerm_a = new THRUSTINTARRAY(n); }
ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr);
d_i.assign(coo_i,coo_i+n);
d_j.assign(coo_j,coo_j+n);
/* Ex.
n = 6
coo_i = [3,3,1,4,1,4]
coo_j = [3,2,2,5,2,6]
*/
auto fkey = thrust::make_zip_iterator(thrust::make_tuple(d_i.begin(),d_j.begin()));
auto ekey = thrust::make_zip_iterator(thrust::make_tuple(d_i.end(),d_j.end()));
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
thrust::sequence(thrust::device, cusp->cooPerm->begin(), cusp->cooPerm->end(), 0);
thrust::sort_by_key(fkey, ekey, cusp->cooPerm->begin(), IJCompare()); /* sort by row, then by col */
*cusp->cooPerm_a = d_i; /* copy the sorted array */
THRUSTINTARRAY w = d_j;
/*
d_i = [1,1,3,3,4,4]
d_j = [2,2,2,3,5,6]
cooPerm = [2,4,1,0,3,5]
*/
auto nekey = thrust::unique(fkey, ekey, IJEqual()); /* unique (d_i, d_j) */
/*
d_i = [1,3,3,4,4,x]
^ekey
d_j = [2,2,3,5,6,x]
^nekye
*/
if (nekey == ekey) { /* all entries are unique */
delete cusp->cooPerm_a;
cusp->cooPerm_a = NULL;
} else { /* Stefano: I couldn't come up with a more elegant algorithm */
/* idea: any change in i or j in the (i,j) sequence implies a new nonzero */
adjacent_difference(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),cusp->cooPerm_a->begin(),IJDiff()); /* cooPerm_a: [1,1,3,3,4,4] => [1,0,1,0,1,0]*/
adjacent_difference(w.begin(),w.end(),w.begin(),IJDiff()); /* w: [2,2,2,3,5,6] => [2,0,0,1,1,1]*/
(*cusp->cooPerm_a)[0] = 0; /* clear the first entry, though accessing an entry on device implies a cudaMemcpy */
w[0] = 0;
thrust::transform(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),w.begin(),cusp->cooPerm_a->begin(),IJSum()); /* cooPerm_a = [0,0,1,1,1,1]*/
thrust::inclusive_scan(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),cusp->cooPerm_a->begin(),thrust::plus<PetscInt>()); /*cooPerm_a=[0,0,1,2,3,4]*/
}
thrust::counting_iterator<PetscInt> search_begin(0);
thrust::upper_bound(d_i.begin(), nekey.get_iterator_tuple().get<0>(), /* binary search entries of [0,1,2,3,4,5,6) in ordered array d_i = [1,3,3,4,4], supposing A->rmap->n = 6. */
search_begin, search_begin + A->rmap->n, /* return in ii[] the index of last position in d_i[] where value could be inserted without violating the ordering */
ii.begin()); /* ii = [0,1,1,3,5,5]. A leading 0 will be added later */
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatSeqXAIJFreeAIJ(A,&a->a,&a->j,&a->i);CHKERRQ(ierr);
a->singlemalloc = PETSC_FALSE;
a->free_a = PETSC_TRUE;
a->free_ij = PETSC_TRUE;
ierr = PetscMalloc1(A->rmap->n+1,&a->i);CHKERRQ(ierr);
a->i[0] = 0; /* a->i = [0,0,1,1,3,5,5] */
cerr = cudaMemcpy(a->i+1,ii.data().get(),A->rmap->n*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
a->nz = a->maxnz = a->i[A->rmap->n];
a->rmax = 0;
ierr = PetscMalloc1(a->nz,&a->a);CHKERRQ(ierr);
ierr = PetscMalloc1(a->nz,&a->j);CHKERRQ(ierr);
cerr = cudaMemcpy(a->j,d_j.data().get(),a->nz*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
if (!a->ilen) { ierr = PetscMalloc1(A->rmap->n,&a->ilen);CHKERRQ(ierr); }
if (!a->imax) { ierr = PetscMalloc1(A->rmap->n,&a->imax);CHKERRQ(ierr); }
for (PetscInt i = 0; i < A->rmap->n; i++) {
const PetscInt nnzr = a->i[i+1] - a->i[i];
nzr += (PetscInt)!!(nnzr);
a->ilen[i] = a->imax[i] = nnzr;
a->rmax = PetscMax(a->rmax,nnzr);
}
a->nonzerorowcnt = nzr;
A->preallocated = PETSC_TRUE;
ierr = PetscLogGpuToCpu((A->rmap->n+a->nz)*sizeof(PetscInt));CHKERRQ(ierr);
ierr = MatMarkDiagonal_SeqAIJ(A);CHKERRQ(ierr);
} else {
ierr = MatSeqAIJSetPreallocation(A,0,NULL);CHKERRQ(ierr);
}
ierr = MatSetOption(A,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr);
/* We want to allocate the CUSPARSE struct for matvec now.
The code is so convoluted now that I prefer to copy zeros */
ierr = PetscArrayzero(a->a,a->nz);CHKERRQ(ierr);
ierr = MatCheckCompressedRow(A,nzr,&a->compressedrow,a->i,A->rmap->n,0.6);CHKERRQ(ierr);
A->offloadmask = PETSC_OFFLOAD_CPU;
A->nonzerostate++;
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr);
A->assembled = PETSC_FALSE;
A->was_assembled = PETSC_FALSE;
PetscFunctionReturn(0);
}
/*@C
MatSeqAIJCUSPARSEGetIJ - returns the device row storage i and j indices for MATSEQAIJCUSPARSE matrices.
Not collective
Input Parameters:
+ A - the matrix
- compressed - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be always returned in compressed form
Output Parameters:
+ ia - the CSR row pointers
- ja - the CSR column indices
Level: developer
Notes:
When compressed is true, the CSR structure does not contain empty rows
.seealso: MatSeqAIJCUSPARSERestoreIJ(), MatSeqAIJCUSPARSEGetArrayRead()
@*/
PetscErrorCode MatSeqAIJCUSPARSEGetIJ(Mat A, PetscBool compressed, const int** i, const int **j)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
CsrMatrix *csr;
PetscErrorCode ierr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
if (!i || !j) PetscFunctionReturn(0);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix*)cusp->mat->mat;
if (i) {
if (!compressed && a->compressedrow.use) { /* need full row offset */
if (!cusp->rowoffsets_gpu) {
cusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1);
cusp->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1);
ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
*i = cusp->rowoffsets_gpu->data().get();
} else *i = csr->row_offsets->data().get();
}
if (j) *j = csr->column_indices->data().get();
PetscFunctionReturn(0);
}
/*@C
MatSeqAIJCUSPARSERestoreIJ - restore the device row storage i and j indices obtained with MatSeqAIJCUSPARSEGetIJ()
Not collective
Input Parameters:
+ A - the matrix
- compressed - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be always returned in compressed form
Output Parameters:
+ ia - the CSR row pointers
- ja - the CSR column indices
Level: developer
.seealso: MatSeqAIJCUSPARSEGetIJ()
@*/
PetscErrorCode MatSeqAIJCUSPARSERestoreIJ(Mat A, PetscBool compressed, const int** i, const int **j)
{
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (i) *i = NULL;
if (j) *j = NULL;
PetscFunctionReturn(0);
}
/*@C
MatSeqAIJCUSPARSEGetArrayRead - gives read-only access to the array where the device data for a MATSEQAIJCUSPARSE matrix is stored
Not Collective
Input Parameter:
. A - a MATSEQAIJCUSPARSE matrix
Output Parameter:
. a - pointer to the device data
Level: developer
Notes: may trigger host-device copies if up-to-date matrix data is on host
.seealso: MatSeqAIJCUSPARSEGetArray(), MatSeqAIJCUSPARSEGetArrayWrite(), MatSeqAIJCUSPARSERestoreArrayRead()
@*/
PetscErrorCode MatSeqAIJCUSPARSEGetArrayRead(Mat A, const PetscScalar** a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
CsrMatrix *csr;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix*)cusp->mat->mat;
if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
*a = csr->values->data().get();
PetscFunctionReturn(0);
}
/*@C
MatSeqAIJCUSPARSERestoreArrayRead - restore the read-only access array obtained from MatSeqAIJCUSPARSEGetArrayRead()
Not Collective
Input Parameter:
. A - a MATSEQAIJCUSPARSE matrix
Output Parameter:
. a - pointer to the device data
Level: developer
.seealso: MatSeqAIJCUSPARSEGetArrayRead()
@*/
PetscErrorCode MatSeqAIJCUSPARSERestoreArrayRead(Mat A, const PetscScalar** a)
{
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
*a = NULL;
PetscFunctionReturn(0);
}
/*@C
MatSeqAIJCUSPARSEGetArray - gives read-write access to the array where the device data for a MATSEQAIJCUSPARSE matrix is stored
Not Collective
Input Parameter:
. A - a MATSEQAIJCUSPARSE matrix
Output Parameter:
. a - pointer to the device data
Level: developer
Notes: may trigger host-device copies if up-to-date matrix data is on host
.seealso: MatSeqAIJCUSPARSEGetArrayRead(), MatSeqAIJCUSPARSEGetArrayWrite(), MatSeqAIJCUSPARSERestoreArray()
@*/
PetscErrorCode MatSeqAIJCUSPARSEGetArray(Mat A, PetscScalar** a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
CsrMatrix *csr;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix*)cusp->mat->mat;
if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
*a = csr->values->data().get();
A->offloadmask = PETSC_OFFLOAD_GPU;
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@C
MatSeqAIJCUSPARSERestoreArray - restore the read-write access array obtained from MatSeqAIJCUSPARSEGetArray()
Not Collective
Input Parameter:
. A - a MATSEQAIJCUSPARSE matrix
Output Parameter:
. a - pointer to the device data
Level: developer
.seealso: MatSeqAIJCUSPARSEGetArray()
@*/
PetscErrorCode MatSeqAIJCUSPARSERestoreArray(Mat A, PetscScalar** a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr);
*a = NULL;
PetscFunctionReturn(0);
}
/*@C
MatSeqAIJCUSPARSEGetArrayWrite - gives write access to the array where the device data for a MATSEQAIJCUSPARSE matrix is stored
Not Collective
Input Parameter:
. A - a MATSEQAIJCUSPARSE matrix
Output Parameter:
. a - pointer to the device data
Level: developer
Notes: does not trigger host-device copies and flags data validity on the GPU
.seealso: MatSeqAIJCUSPARSEGetArray(), MatSeqAIJCUSPARSEGetArrayRead(), MatSeqAIJCUSPARSERestoreArrayWrite()
@*/
PetscErrorCode MatSeqAIJCUSPARSEGetArrayWrite(Mat A, PetscScalar** a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
CsrMatrix *csr;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix*)cusp->mat->mat;
if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
*a = csr->values->data().get();
A->offloadmask = PETSC_OFFLOAD_GPU;
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@C
MatSeqAIJCUSPARSERestoreArrayWrite - restore the write-only access array obtained from MatSeqAIJCUSPARSEGetArrayWrite()
Not Collective
Input Parameter:
. A - a MATSEQAIJCUSPARSE matrix
Output Parameter:
. a - pointer to the device data
Level: developer
.seealso: MatSeqAIJCUSPARSEGetArrayWrite()
@*/
PetscErrorCode MatSeqAIJCUSPARSERestoreArrayWrite(Mat A, PetscScalar** a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr);
*a = NULL;
PetscFunctionReturn(0);
}
struct IJCompare4
{
__host__ __device__
inline bool operator() (const thrust::tuple<int, int, PetscScalar, int> &t1, const thrust::tuple<int, int, PetscScalar, int> &t2)
{
if (t1.get<0>() < t2.get<0>()) return true;
if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>();
return false;
}
};
struct Shift
{
int _shift;
Shift(int shift) : _shift(shift) {}
__host__ __device__
inline int operator() (const int &c)
{
return c + _shift;
}
};
/* merges two SeqAIJCUSPARSE matrices A, B by concatenating their rows. [A';B']' operation in matlab notation */
PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat A,Mat B,MatReuse reuse,Mat* C)
{
PetscErrorCode ierr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data, *b = (Mat_SeqAIJ*)B->data, *c;
Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr, *Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr, *Ccusp;
Mat_SeqAIJCUSPARSEMultStruct *Cmat;
CsrMatrix *Acsr,*Bcsr,*Ccsr;
PetscInt Annz,Bnnz;
cusparseStatus_t stat;
PetscInt i,m,n,zero = 0;
cudaError_t cerr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidHeaderSpecific(B,MAT_CLASSID,2);
PetscValidPointer(C,4);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
PetscCheckTypeName(B,MATSEQAIJCUSPARSE);
if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Invalid number or rows %D != %D",A->rmap->n,B->rmap->n);
if (reuse == MAT_INPLACE_MATRIX) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_INPLACE_MATRIX not supported");
if (Acusp->format == MAT_CUSPARSE_ELL || Acusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (Bcusp->format == MAT_CUSPARSE_ELL || Bcusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (reuse == MAT_INITIAL_MATRIX) {
m = A->rmap->n;
n = A->cmap->n + B->cmap->n;
ierr = MatCreate(PETSC_COMM_SELF,C);CHKERRQ(ierr);
ierr = MatSetSizes(*C,m,n,m,n);CHKERRQ(ierr);
ierr = MatSetType(*C,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
c = (Mat_SeqAIJ*)(*C)->data;
Ccusp = (Mat_SeqAIJCUSPARSE*)(*C)->spptr;
Cmat = new Mat_SeqAIJCUSPARSEMultStruct;
Ccsr = new CsrMatrix;
Cmat->cprowIndices = NULL;
c->compressedrow.use = PETSC_FALSE;
c->compressedrow.nrows = 0;
c->compressedrow.i = NULL;
c->compressedrow.rindex = NULL;
Ccusp->workVector = NULL;
Ccusp->nrows = m;
Ccusp->mat = Cmat;
Ccusp->mat->mat = Ccsr;
Ccsr->num_rows = m;
Ccsr->num_cols = n;
stat = cusparseCreateMatDescr(&Cmat->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(Cmat->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = cusparseSetMatType(Cmat->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
cerr = cudaMalloc((void **)&(Cmat->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&(Cmat->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMemcpy(Cmat->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMemcpy(Cmat->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(B);CHKERRQ(ierr);
if (!Acusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
if (!Bcusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
Acsr = (CsrMatrix*)Acusp->mat->mat;
Bcsr = (CsrMatrix*)Bcusp->mat->mat;
Annz = (PetscInt)Acsr->column_indices->size();
Bnnz = (PetscInt)Bcsr->column_indices->size();
c->nz = Annz + Bnnz;
Ccsr->row_offsets = new THRUSTINTARRAY32(m+1);
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
Ccsr->values = new THRUSTARRAY(c->nz);
Ccsr->num_entries = c->nz;
Ccusp->cooPerm = new THRUSTINTARRAY(c->nz);
if (c->nz) {
auto Acoo = new THRUSTINTARRAY32(Annz);
auto Bcoo = new THRUSTINTARRAY32(Bnnz);
auto Ccoo = new THRUSTINTARRAY32(c->nz);
THRUSTINTARRAY32 *Aroff,*Broff;
if (a->compressedrow.use) { /* need full row offset */
if (!Acusp->rowoffsets_gpu) {
Acusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1);
Acusp->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1);
ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
Aroff = Acusp->rowoffsets_gpu;
} else Aroff = Acsr->row_offsets;
if (b->compressedrow.use) { /* need full row offset */
if (!Bcusp->rowoffsets_gpu) {
Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1);
Bcusp->rowoffsets_gpu->assign(b->i,b->i + B->rmap->n + 1);
ierr = PetscLogCpuToGpu((B->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
Broff = Bcusp->rowoffsets_gpu;
} else Broff = Bcsr->row_offsets;
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
stat = cusparseXcsr2coo(Acusp->handle,
Aroff->data().get(),
Annz,
m,
Acoo->data().get(),
CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = cusparseXcsr2coo(Bcusp->handle,
Broff->data().get(),
Bnnz,
m,
Bcoo->data().get(),
CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
/* Issues when using bool with large matrices on SUMMIT 10.2.89 */
auto Aperm = thrust::make_constant_iterator(1);
auto Bperm = thrust::make_constant_iterator(0);
#if PETSC_PKG_CUDA_VERSION_GE(10,0,0)
auto Bcib = thrust::make_transform_iterator(Bcsr->column_indices->begin(),Shift(A->cmap->n));
auto Bcie = thrust::make_transform_iterator(Bcsr->column_indices->end(),Shift(A->cmap->n));
#else
/* there are issues instantiating the merge operation using a transform iterator for the columns of B */
auto Bcib = Bcsr->column_indices->begin();
auto Bcie = Bcsr->column_indices->end();
thrust::transform(Bcib,Bcie,Bcib,Shift(A->cmap->n));
#endif
auto wPerm = new THRUSTINTARRAY32(Annz+Bnnz);
auto Azb = thrust::make_zip_iterator(thrust::make_tuple(Acoo->begin(),Acsr->column_indices->begin(),Acsr->values->begin(),Aperm));
auto Aze = thrust::make_zip_iterator(thrust::make_tuple(Acoo->end(),Acsr->column_indices->end(),Acsr->values->end(),Aperm));
auto Bzb = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->begin(),Bcib,Bcsr->values->begin(),Bperm));
auto Bze = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->end(),Bcie,Bcsr->values->end(),Bperm));
auto Czb = thrust::make_zip_iterator(thrust::make_tuple(Ccoo->begin(),Ccsr->column_indices->begin(),Ccsr->values->begin(),wPerm->begin()));
auto p1 = Ccusp->cooPerm->begin();
auto p2 = Ccusp->cooPerm->begin();
thrust::advance(p2,Annz);
PetscStackCallThrust(thrust::merge(thrust::device,Azb,Aze,Bzb,Bze,Czb,IJCompare4()));
#if PETSC_PKG_CUDA_VERSION_LT(10,0,0)
thrust::transform(Bcib,Bcie,Bcib,Shift(-A->cmap->n));
#endif
auto cci = thrust::make_counting_iterator(zero);
auto cce = thrust::make_counting_iterator(c->nz);
#if 0 //Errors on SUMMIT cuda 11.1.0
PetscStackCallThrust(thrust::partition_copy(thrust::device,cci,cce,wPerm->begin(),p1,p2,thrust::identity<int>()));
#else
auto pred = thrust::identity<int>();
PetscStackCallThrust(thrust::copy_if(thrust::device,cci,cce,wPerm->begin(),p1,pred));
PetscStackCallThrust(thrust::remove_copy_if(thrust::device,cci,cce,wPerm->begin(),p2,pred));
#endif
stat = cusparseXcoo2csr(Ccusp->handle,
Ccoo->data().get(),
c->nz,
m,
Ccsr->row_offsets->data().get(),
CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
delete wPerm;
delete Acoo;
delete Bcoo;
delete Ccoo;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, Ccsr->num_entries,
Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get(),
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
#endif
if (A->form_explicit_transpose && B->form_explicit_transpose) { /* if A and B have the transpose, generate C transpose too */
PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE;
Mat_SeqAIJCUSPARSEMultStruct *CmatT = new Mat_SeqAIJCUSPARSEMultStruct;
CsrMatrix *CcsrT = new CsrMatrix;
CsrMatrix *AcsrT = AT ? (CsrMatrix*)Acusp->matTranspose->mat : NULL;
CsrMatrix *BcsrT = BT ? (CsrMatrix*)Bcusp->matTranspose->mat : NULL;
(*C)->form_explicit_transpose = PETSC_TRUE;
(*C)->transupdated = PETSC_TRUE;
Ccusp->rowoffsets_gpu = NULL;
CmatT->cprowIndices = NULL;
CmatT->mat = CcsrT;
CcsrT->num_rows = n;
CcsrT->num_cols = m;
CcsrT->num_entries = c->nz;
CcsrT->row_offsets = new THRUSTINTARRAY32(n+1);
CcsrT->column_indices = new THRUSTINTARRAY32(c->nz);
CcsrT->values = new THRUSTARRAY(c->nz);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
auto rT = CcsrT->row_offsets->begin();
if (AT) {
rT = thrust::copy(AcsrT->row_offsets->begin(),AcsrT->row_offsets->end(),rT);
thrust::advance(rT,-1);
}
if (BT) {
auto titb = thrust::make_transform_iterator(BcsrT->row_offsets->begin(),Shift(a->nz));
auto tite = thrust::make_transform_iterator(BcsrT->row_offsets->end(),Shift(a->nz));
thrust::copy(titb,tite,rT);
}
auto cT = CcsrT->column_indices->begin();
if (AT) cT = thrust::copy(AcsrT->column_indices->begin(),AcsrT->column_indices->end(),cT);
if (BT) thrust::copy(BcsrT->column_indices->begin(),BcsrT->column_indices->end(),cT);
auto vT = CcsrT->values->begin();
if (AT) vT = thrust::copy(AcsrT->values->begin(),AcsrT->values->end(),vT);
if (BT) thrust::copy(BcsrT->values->begin(),BcsrT->values->end(),vT);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
stat = cusparseCreateMatDescr(&CmatT->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(CmatT->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = cusparseSetMatType(CmatT->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
cerr = cudaMalloc((void **)&(CmatT->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&(CmatT->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&(CmatT->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMemcpy(CmatT->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMemcpy(CmatT->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMemcpy(CmatT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparseCreateCsr(&CmatT->matDescr, CcsrT->num_rows, CcsrT->num_cols, CcsrT->num_entries,
CcsrT->row_offsets->data().get(), CcsrT->column_indices->data().get(), CcsrT->values->data().get(),
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
#endif
Ccusp->matTranspose = CmatT;
}
}
c->singlemalloc = PETSC_FALSE;
c->free_a = PETSC_TRUE;
c->free_ij = PETSC_TRUE;
ierr = PetscMalloc1(m+1,&c->i);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->j);CHKERRQ(ierr);
if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */
THRUSTINTARRAY ii(Ccsr->row_offsets->size());
THRUSTINTARRAY jj(Ccsr->column_indices->size());
ii = *Ccsr->row_offsets;
jj = *Ccsr->column_indices;
cerr = cudaMemcpy(c->i,ii.data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = cudaMemcpy(c->j,jj.data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
} else {
cerr = cudaMemcpy(c->i,Ccsr->row_offsets->data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = cudaMemcpy(c->j,Ccsr->column_indices->data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
ierr = PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size())*sizeof(PetscInt));CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->ilen);CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->imax);CHKERRQ(ierr);
c->maxnz = c->nz;
c->nonzerorowcnt = 0;
c->rmax = 0;
for (i = 0; i < m; i++) {
const PetscInt nn = c->i[i+1] - c->i[i];
c->ilen[i] = c->imax[i] = nn;
c->nonzerorowcnt += (PetscInt)!!nn;
c->rmax = PetscMax(c->rmax,nn);
}
ierr = MatMarkDiagonal_SeqAIJ(*C);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->a);CHKERRQ(ierr);
(*C)->nonzerostate++;
ierr = PetscLayoutSetUp((*C)->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp((*C)->cmap);CHKERRQ(ierr);
Ccusp->nonzerostate = (*C)->nonzerostate;
(*C)->preallocated = PETSC_TRUE;
} else {
if ((*C)->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Invalid number or rows %D != %D",(*C)->rmap->n,B->rmap->n);
c = (Mat_SeqAIJ*)(*C)->data;
if (c->nz) {
Ccusp = (Mat_SeqAIJCUSPARSE*)(*C)->spptr;
if (!Ccusp->cooPerm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cooPerm");
if (Ccusp->format == MAT_CUSPARSE_ELL || Ccusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (Ccusp->nonzerostate != (*C)->nonzerostate) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Wrong nonzerostate");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
if (!Acusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
if (!Bcusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
Acsr = (CsrMatrix*)Acusp->mat->mat;
Bcsr = (CsrMatrix*)Bcusp->mat->mat;
Ccsr = (CsrMatrix*)Ccusp->mat->mat;
if (Acsr->num_entries != (PetscInt)Acsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"A nnz %D != %D",Acsr->num_entries,(PetscInt)Acsr->values->size());
if (Bcsr->num_entries != (PetscInt)Bcsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"B nnz %D != %D",Bcsr->num_entries,(PetscInt)Bcsr->values->size());
if (Ccsr->num_entries != (PetscInt)Ccsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"C nnz %D != %D",Ccsr->num_entries,(PetscInt)Ccsr->values->size());
if (Ccsr->num_entries != Acsr->num_entries + Bcsr->num_entries) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_COR,"C nnz %D != %D + %D",Ccsr->num_entries,Acsr->num_entries,Bcsr->num_entries);
if (Ccusp->cooPerm->size() != Ccsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"permSize %D != %D",(PetscInt)Ccusp->cooPerm->size(),(PetscInt)Ccsr->values->size());
auto pmid = Ccusp->cooPerm->begin();
thrust::advance(pmid,Acsr->num_entries);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
auto zibait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->begin(),
thrust::make_permutation_iterator(Ccsr->values->begin(),Ccusp->cooPerm->begin())));
auto zieait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->end(),
thrust::make_permutation_iterator(Ccsr->values->begin(),pmid)));
thrust::for_each(zibait,zieait,VecCUDAEquals());
auto zibbit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->begin(),
thrust::make_permutation_iterator(Ccsr->values->begin(),pmid)));
auto ziebit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->end(),
thrust::make_permutation_iterator(Ccsr->values->begin(),Ccusp->cooPerm->end())));
thrust::for_each(zibbit,ziebit,VecCUDAEquals());
ierr = MatSeqAIJCUSPARSEInvalidateTranspose(*C,PETSC_FALSE);CHKERRQ(ierr);
if (A->form_explicit_transpose && B->form_explicit_transpose && (*C)->form_explicit_transpose) {
if (!Ccusp->matTranspose) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing transpose Mat_SeqAIJCUSPARSEMultStruct");
PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE;
CsrMatrix *AcsrT = AT ? (CsrMatrix*)Acusp->matTranspose->mat : NULL;
CsrMatrix *BcsrT = BT ? (CsrMatrix*)Bcusp->matTranspose->mat : NULL;
CsrMatrix *CcsrT = (CsrMatrix*)Ccusp->matTranspose->mat;
auto vT = CcsrT->values->begin();
if (AT) vT = thrust::copy(AcsrT->values->begin(),AcsrT->values->end(),vT);
if (BT) thrust::copy(BcsrT->values->begin(),BcsrT->values->end(),vT);
(*C)->transupdated = PETSC_TRUE;
}
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
}
}
ierr = PetscObjectStateIncrease((PetscObject)*C);CHKERRQ(ierr);
(*C)->assembled = PETSC_TRUE;
(*C)->was_assembled = PETSC_FALSE;
(*C)->offloadmask = PETSC_OFFLOAD_GPU;
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[])
{
PetscErrorCode ierr;
bool dmem;
const PetscScalar *av;
cudaError_t cerr;
PetscFunctionBegin;
dmem = isCudaMem(v);
ierr = MatSeqAIJCUSPARSEGetArrayRead(A,&av);CHKERRQ(ierr);
if (n && idx) {
THRUSTINTARRAY widx(n);
widx.assign(idx,idx+n);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
THRUSTARRAY *w = NULL;
thrust::device_ptr<PetscScalar> dv;
if (dmem) {
dv = thrust::device_pointer_cast(v);
} else {
w = new THRUSTARRAY(n);
dv = w->data();
}
thrust::device_ptr<const PetscScalar> dav = thrust::device_pointer_cast(av);
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav,widx.begin()),dv));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav,widx.end()),dv+n));
thrust::for_each(zibit,zieit,VecCUDAEquals());
if (w) {
cerr = cudaMemcpy(v,w->data().get(),n*sizeof(PetscScalar),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
delete w;
} else {
cerr = cudaMemcpy(v,av,n*sizeof(PetscScalar),dmem ? cudaMemcpyDeviceToDevice : cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
if (!dmem) { ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr); }
ierr = MatSeqAIJCUSPARSERestoreArrayRead(A,&av);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
|
c26966a4ac257490b42fcae3ce341b4f3df9e049.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#define N 512
__global__ void add( int *a, int *b, int *c ) {
c[threadIdx.x] = a[threadIdx.x]+b[threadIdx.x];
}
void random_ints(int *p, int n) {
int i;
for(i=0; i<n; i++) {
p[i]=rand();
}
}
int main( void ) {
int *a, *b, *c, *d; // host copies of a, b, c
int *dev_a, *dev_b, *dev_c; // device copies of a, b, c
int size = N * sizeof( int ); // we need space for 512 // integers
int i;
// allocate device copies of a, b, c
hipMalloc( (void**)&dev_a, size );
hipMalloc( (void**)&dev_b, size );
hipMalloc( (void**)&dev_c, size );
a = (int*)malloc( size );
b = (int*)malloc( size );
c = (int*)malloc( size );
d = (int*)malloc( size );
random_ints( a, N );
random_ints( b, N );
// copy inputs to device
hipMemcpy( dev_a, a, size, hipMemcpyHostToDevice );
hipMemcpy( dev_b, b, size, hipMemcpyHostToDevice );
// launch an add() kernel with N threads
hipLaunchKernelGGL(( add), dim3(1), dim3(N) , 0, 0, dev_a, dev_b, dev_c );
// copy device result back to host copy of c
hipMemcpy( c, dev_c, size, hipMemcpyDeviceToHost );
for(i=0; i<N; i++) {
d[i]=a[i]+b[i];
if(d[i]!=c[i]) {
printf("error: expected %d, got %d!\n",c[i], d[i]);
break;
}
}
if(i==N) {
printf("correct!\n");
}
free( a ); free( b ); free( c );
hipFree( dev_a );
hipFree( dev_b );
hipFree( dev_c );
return 0;
}
| c26966a4ac257490b42fcae3ce341b4f3df9e049.cu | #include <stdio.h>
#include <math.h>
#define N 512
__global__ void add( int *a, int *b, int *c ) {
c[threadIdx.x] = a[threadIdx.x]+b[threadIdx.x];
}
void random_ints(int *p, int n) {
int i;
for(i=0; i<n; i++) {
p[i]=rand();
}
}
int main( void ) {
int *a, *b, *c, *d; // host copies of a, b, c
int *dev_a, *dev_b, *dev_c; // device copies of a, b, c
int size = N * sizeof( int ); // we need space for 512 // integers
int i;
// allocate device copies of a, b, c
cudaMalloc( (void**)&dev_a, size );
cudaMalloc( (void**)&dev_b, size );
cudaMalloc( (void**)&dev_c, size );
a = (int*)malloc( size );
b = (int*)malloc( size );
c = (int*)malloc( size );
d = (int*)malloc( size );
random_ints( a, N );
random_ints( b, N );
// copy inputs to device
cudaMemcpy( dev_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, size, cudaMemcpyHostToDevice );
// launch an add() kernel with N threads
add<<< 1, N >>>( dev_a, dev_b, dev_c );
// copy device result back to host copy of c
cudaMemcpy( c, dev_c, size, cudaMemcpyDeviceToHost );
for(i=0; i<N; i++) {
d[i]=a[i]+b[i];
if(d[i]!=c[i]) {
printf("error: expected %d, got %d!\n",c[i], d[i]);
break;
}
}
if(i==N) {
printf("correct!\n");
}
free( a ); free( b ); free( c );
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
return 0;
}
|
c2a5c78a3bbd39718826b8dbc7044d1b43a154b1.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "string"
#include "fstream"
#include "cudaFEM_read.cuh"
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "rocblas.h"
#include <iostream>
#include "fstream"
#include <hip/hip_runtime.h>
#include <cusolverSp.h>
#include "device_launch_parameters.h"
#include <cusolverDn.h>
#include <hipsparse.h>
#include <vector>
#include <cassert>
#include "Utilities.cuh"
#include <ctime>
#include "cuda_functions.cuh"
#define max(a,b) ((a) > (b) ? (a) : (b))
#define IDX2C(i,j,ld) (((j)*(ld))+( i )) //first entry is columns and second entry is rows.
#define threeD21D(row_d,col_d,el_d,width_d,depth_d) (row_d+width_d*(col_d+depth_d*el_d))
#define nodesinelemX(node,el,nodesPerElem) (node + nodesPerElem*el)
#define nodesDisplacementX(dof,node,dimension) (dof + node*dimension)
Geometry::Geometry(){
std::cout << "Geometry Object created" << std::endl;
cuda_use = false;
}
Geometry::~Geometry(){
std::cout << "Geometry Object deleted" << std::endl;
//deleteing dynamic arrays
delete[] x;
delete[] y;
delete[] z;
for (int e = 0; e < numE; e++){
for (int i = 0; i < numNodesPerElem*dim; i++){
delete E[e][i];
delete M[e][i];
}
delete E[e];
delete M[e];
delete nodesInElem[e];
}
for (int i = 0; i < numNodes; i++){
delete displaceInElem[i];
}
for (int i = 0; i < numNodes*dim; i++) {
delete K[i];
}
delete[] K;
delete[] u;
delete[] f;
delete[] displaceInElem;
delete[] E;
delete[] M;
delete[] nodesInElem;
delete[] E_vector_host;
delete[] elemForce;
delete[] forceVec_x;
delete[] forceVec_y;
delete[] K_vector_form;
//
hipFree(d_A_dense);
hipFree(d_nnzPerVector);
hipFree(d_A);
hipFree(d_A_RowIndices);
hipFree(d_A_ColIndices);
hipFree(nodesInElem_device);
hipFree(d_x);
hipFree(d_y);
hipFree(d_z);
hipFree(E_vector_device);
free(h_nnzPerVector);
//free(h_A_dense);
}
void Geometry::read_nodes(){
std::ifstream in_matrix("FEM_Nodes.txt");
if (!in_matrix){
std::cout << "cannot open Nodes \n";
}
in_matrix >> numNodes;
x = new double[numNodes];
y = new double[numNodes];
z = new double[numNodes];
x_init = new double[numNodes];
y_init = new double[numNodes];
if (dim == 3){
for (int i = 0; i < numNodes; i++){
in_matrix >> x[i] >> y[i] >> z[i];
}
}
else if(dim ==2){
for (int i = 0; i < numNodes; i++){
in_matrix >> x[i] >> y[i];
x_init[i] = x[i];
y_init[i] = y[i];
z[i] = 0;
}
}
in_matrix.close();
//u = new double[numNodes*dim];
b_rhs = new float[numNodes*dim];
}
void Geometry::read_elem(){
std::ifstream in_elem("FEM_Elem.txt");
std::cout << "Reading in element files" << std::endl;
if (!in_elem){
std::cout << "cannot open Element file \n";
}
int a;
in_elem >>numE >> numNodesPerElem;
//Allocating E matrix 3x3x3 matrix
E = new double**[numE];
M = new double**[numE];
nodesInElem = new int*[numE];
nodesInElem_host = new int[numE*numNodesPerElem];
nodesInElem_device = new int[numE*numNodesPerElem];
//Allocate a new vector for storing all of the stresses at an element
global_stress_mises = new double[numE];
hipMalloc((void**)&nodesInElem_device, numE*numNodesPerElem*sizeof(int));
for (int e = 0; e < numE; e++){
E[e] = new double*[numNodesPerElem*dim];
M[e] = new double*[numNodesPerElem*dim];
nodesInElem[e] = new int[numNodesPerElem];
for (int i = 0; i < numNodesPerElem*dim; i++){
E[e][i] = new double[numNodesPerElem*dim];
M[e][i] = new double[numNodesPerElem*dim];
}
}
E_vector_host = new double[numE*numNodesPerElem*dim*numNodesPerElem*dim];
hipMalloc((void**)&E_vector_device, numE*numNodesPerElem*dim*numNodesPerElem*dim*sizeof(*E_vector_device));
//Populating the nodesinelem matrix
for (int e = 0; e < numE; e++) {
for (int i = 0; i < numNodesPerElem; i++)
in_elem >> nodesInElem[e][i];
}
in_elem.close();
for (int e = 0; e < numE; e++) {
for (int i = 0; i < numNodesPerElem; i++){
nodesInElem_host[nodesinelemX(i, e, numNodesPerElem)] = nodesInElem[e][i];
//std::cout << nodesInElem_host[nodesinelemX(i, e, numNodesPerElem)] << std::endl;
}
//std::cout << std::endl;
}
hipMemcpy(nodesInElem_device, nodesInElem_host, numE*numNodesPerElem*sizeof(int), hipMemcpyHostToDevice);
std::ifstream in_disp("FEM_displacement.txt");
if (!in_disp){
std::cout << "cannot open displacement file \n";
}
displaceInElem = new int*[numNodes];
displaceInElem_host = new int[numNodes*dim];
displaceInElem_device = new int[numNodes*dim];
for (int i = 0; i < numNodes; i++){
displaceInElem[i] = new int[3];
}
hipMalloc((void**)&displaceInElem_device, numNodes*dim*sizeof(int));
for (int i = 0; i < numNodes; i++){
for (int j = 0; j < dim; j++){
in_disp >> displaceInElem[i][j];
}
}
for (int i = 0; i < numNodes; i++){
for (int j = 0; j < dim; j++){
displaceInElem_host[nodesDisplacementX(j, i, dim)] = displaceInElem[i][j];
}
}
hipMemcpy(displaceInElem_device, displaceInElem_host, numNodes*dim*sizeof(int), hipMemcpyHostToDevice);
in_disp.close();
}
void Geometry::read_force(){
std::ifstream in_matrix("FEM_force.txt");
if (!in_matrix){
std::cout << "cannot open force file \n";
}
else{
in_matrix >> numForceBC;
elemForce = new int[numForceBC];
localcoordForce = new int[numForceBC];
forceVec_x = new double[numForceBC];
forceVec_y = new double[numForceBC];
for (int i = 0; i < numForceBC; i++){
in_matrix >> elemForce[i] >> localcoordForce[i] >> forceVec_x[i] >> forceVec_y[i];
}
}
in_matrix.close();
}
void Geometry::initilizeMatrices(){
#if 0
hipMalloc((void**)&d_x, numNodes*sizeof(double));
hipMalloc((void**)&d_y, numNodes*sizeof(double));
hipMalloc((void**)&d_z, numNodes*sizeof(double));
#endif // 0
hipMalloc((void**)&d_x_dist, numNodes*sizeof(*d_x_dist));
hipMalloc((void**)&d_y_dist, numNodes*sizeof(*d_x_dist));
hipMalloc((void**)&d_z_dist, numNodes*sizeof(*d_x_dist));
K = new double*[numNodes*dim];
h_A_dense = new float[numNodes*dim*numNodes*dim*sizeof(*h_A_dense)];
h_M_dense = new double[numNodes*dim*numNodes*dim*sizeof(*h_M_dense)];
L = new float[numNodes*dim*numNodes*dim*sizeof(*L)];
//d_A_dense_double = new double[numNodes*dim*numNodes*dim*sizeof(*d_A_dense_double)];
h_A_dense_double = new double[numNodes*dim*numNodes*dim*sizeof(*h_A_dense_double)];
gpuErrchk(hipMalloc((void**)&d_A_dense, numNodes*dim*numNodes*dim* sizeof(*d_A_dense)));
gpuErrchk(hipMalloc((void**)&device_L, numNodes*dim*numNodes*dim* sizeof(*device_L)));
gpuErrchk(hipMalloc((void**)&d_A_dense_double, numNodes*dim*numNodes*dim* sizeof(*d_A_dense_double)));
//B = new double*[3];
for (int i = 0; i < 6; i++){
//B[i] = new double[3];
}
for (int i = 0; i < numNodes*dim; i++) {
K[i] = new double[numNodes*dim];
}
u = new double[numNodes*dim];
f = new double[numNodes*dim];
u_dot = new double[numNodes*dim];
u_doubledot = new double[numNodes*dim];
u_doubledot_old = new double[numNodes*dim];
for (int i = 0; i < numNodes*dim; i++){
f[i] = 0;
}
}
void Geometry::make_K_matrix(){
std::clock_t start_K_local1;
std::clock_t start_K_global;
start_K_local1 = std::clock();
bool cuda_use = get_cuda_use();
//bool cuda_use = true;
std::clock_t start_K_actual;
start_K_actual = std::clock();
start_K_global = std::clock();
if (cuda_use){
if (dim == 2){
Linear2DBarycentric_B_CUDA_host();
}
else if (dim == 3){
Linear3DBarycentric_B_CUDA_host();
}
}
else{
for (int e = 0; e < numE; e++) {
//cout << Linear2DJacobianDet_Barycentric(nodesInElem[e], x, y) << endl;
if (dim == 2){
AssembleLocalElementMatrixBarycentric2D(e,nodesInElem[e],displaceInElem, x, y, dim, E[e],M[e], Poisson, Young, thickness);
}
else if (dim == 3){
AssembleLocalElementMatrixBarycentric3D(nodesInElem[e], x, y, z, dim, E[e], Poisson, Young, thickness);
}
}
}
double duration_K_local = (std::clock() - start_K_local1) / (double)CLOCKS_PER_SEC;
if (!cuda_use)
AssembleGlobalElementMatrixBarycentric(numNodes*dim, numE, numNodesPerElem, nodesInElem, E, M, h_A_dense, h_M_dense, displaceInElem);
double duration_K_global = (std::clock() - start_K_global) / (double)CLOCKS_PER_SEC;
//std::cout << " CUDA K ASSEMBLE: " << duration_K_global << std::endl;
//ApplyEssentialBoundaryConditionsBarycentric(numNodes*dim, numForceBC, localcoordForce, elemForce, forceVec_x, forceVec_y, f, K, nodesInElem, thickness, x, y, displaceInElem);
//ApplyEssentialBoundaryConditionsBarycentric(numNodes*dim, sudo_node_force, localcoordForce, elemForce, sudo_force_x, sudo_force_y, f, K, nodesInElem, thickness, x, y, displaceInElem);
ApplySudoForcesBarycentric(numNodes*dim, sudo_node_force, localcoordForce, elemForce, sudo_force_x, sudo_force_y, f, nodesInElem, thickness, x, y, displaceInElem);
/*for (int i = 0; i < numNodes*dim; i++){
std::cout << f[i] << std::endl;
}*/
//std::cout << "FPS time local K matrix: " << duration_K_local << std::endl;
//std::cout << "FPS time global K matrix: " << duration_K_global << std::endl;
//std::cout << "sudo force x: " << sudo_force_x << " sudo_force y: " << sudo_force_y << std::endl;
}
//void Geometry::call_sudo_force_func(void){
//
// //call this to apply the sudo forces
// //ApplySudoForcesBarycentric(numNodes*dim, sudo_node_force, localcoordForce, elemForce, sudo_force_x, sudo_force_y, f, nodesInElem, thickness, x, y, displaceInElem, force_reset);
//
//}
void Geometry::AssembleGlobalElementMatrixBarycentric(int numP, int numE, int nodesPerElem, int **elem, double ***E,double ***M, float *K, double *global_M, int **displaceInElem){
//cout << numP << endl << endl << endl << endl;
//Initialising several variables
int i;
int j;
int row;
int col;
//Make a numPxnumP matrix all equal to zero
for (j = 0; j < numP; j++){
for (i = 0; i < numP; i++){
K[IDX2C(j, i, numP)] = 0;
L[IDX2C(j, i, numP)] =0;
global_M[IDX2C(j, i, numP)] = 0;
}
}
int dummy_node;
int loop_node;
int dummy_row;
int dummy_col;
int *DOF = new int[numNodes*dim];
int counter;
for (int k = 0; k < numE; k++){
counter = 0;
for (int npe = 0; npe < numNodesPerElem; npe++){
dummy_node = elem[k][npe]; // The row of the matrix we looking at will be k_th element and npe (nodes per element)
for (int dof = 0; dof < dim; dof++){
row = displaceInElem[dummy_node][dof];
DOF[counter] = row;
//cout << DOF[counter] << endl;
counter++;
}
}
for (int c = 0; c < numNodesPerElem*dim; c++){
for (int r = 0; r < numNodesPerElem*dim; r++){
K[IDX2C(DOF[c], DOF[r], numP)] = K[IDX2C(DOF[c], DOF[r], numP)] + E[k][r][c];
global_M[IDX2C(DOF[c], DOF[r], numP)] = global_M[IDX2C(DOF[c], DOF[r], numP)] + M[k][r][c];
L[IDX2C(DOF[c], DOF[r], numP)] = (dt*c_xi*beta_1+dt*dt*beta_2 / 2.0)*K[IDX2C(DOF[c], DOF[r], numP)] + (1+dt*beta_1*c_alpha)*global_M[IDX2C(DOF[c], DOF[r], numP)]; //
//K[IDX2C(DOF[r], DOF[c], numP*dim)] = K[IDX2C(DOF[r], DOF[c], numP*dim)] + E[k][r][c];
}
}
}
//for (int k = 0; k < numE; k++){
// counter = 0;
// for (int npe = 0; npe < numNodesPerElem; npe++){
// dummy_node = elem[k][npe]; // The row of the matrix we looking at will be k_th element and npe (nodes per element)
// for (int dof = 0; dof < dim; dof++){
// row = displaceInElem[dummy_node][dof];
// DOF[counter] = row;
// //cout << DOF[counter] << endl;
// counter++;
// }
// }
// for (int c = 0; c < numNodesPerElem*dim; c++){
// for (int r = 0; r < numNodesPerElem*dim; r++){
//
// L[IDX2C(DOF[c], DOF[r], numP)] = (dt*c_xi*beta_1 + dt*dt*beta_2 / 2.0)*K[IDX2C(DOF[c], DOF[r], numP)] + (1 + dt*beta_1*c_alpha)*global_M[IDX2C(DOF[c], DOF[r], numP)]; //
// //K[IDX2C(DOF[r], DOF[c], numP*dim)] = K[IDX2C(DOF[r], DOF[c], numP*dim)] + E[k][r][c];
// }
// }
//}
//for (int k = 0; k < numE; k++){
//
// for (int c = 0; c < numNodesPerElem*dim; c++){
// for (int r = 0; r < numNodesPerElem*dim; r++){
//
// L[IDX2C(DOF[c], DOF[r], numP)] = (dt*c_xi*beta_1 + dt*dt*beta_2 / 2.0)*K[IDX2C(DOF[c], DOF[r], numP)] + (1 + dt*beta_1*c_alpha)*global_M[IDX2C(DOF[c], DOF[r], numP)]; //
// //K[IDX2C(DOF[r], DOF[c], numP*dim)] = K[IDX2C(DOF[r], DOF[c], numP*dim)] + E[k][r][c];
// }
// }
//}
//for (i = 0; i < 10; i++){
// for (j = 0; j < 10; j++){
// std::cout << global_M[IDX2C(j, i, numP)] << " ";
// }
// std::cout << std::endl;
//}
}
void Geometry::find_b(){
int du = numNodes*dim;
double use_number;
double dummy_row;
double dummy_row1;
//I am going to apply a forcef only at the initial time step, and then will be zero.
for (int i = 0; i < numNodes*dim; i++){
use_number = 0;
for (int j = 0; j < numNodes*dim; j++){
dummy_row = 0;
dummy_row1 = 0;
dummy_row = u[j] + dt*u_dot[j] + (dt*dt / 2.0)*(1.0 - beta_2)*u_doubledot[j];
dummy_row1 = u_dot[j] + dt*(1.0 - beta_1)*u_doubledot[j];
use_number = use_number + h_A_dense[IDX2C(i, j, du)] * dummy_row + (h_A_dense[IDX2C(i, j, du)] * c_xi + h_M_dense[IDX2C(i, j, du)]*c_alpha)*dummy_row1;
//use_number = use_number + h_A_dense[IDX2C(i, j, du)] * dummy_row;
}
b_rhs[i] = f[i] - use_number;
if (f[i]>0){
std::cout << "f" << std::endl;
}
//std::cout << b_rhs[i] ;
}
}
//initializing the dynamic array
void Geometry::initialize_zerovector(int numberofpoints){
numNodesZero = numberofpoints;
vector_zero_nodes = new int[numberofpoints];
}
void Geometry::initialize_dynamic(){
for (int i = 0; i < numNodes*dim; i++){
u[i] = u_dot[i] = u_doubledot[i] = u_doubledot_old[i]=0.0;
}
}
void Geometry::update_dynamic_vectors(){
for (int i = 0; i < numNodes*dim; i++){
u[i] =dt*u_dot[i] + (dt*dt / 2.0)*((1 - beta_2)*u_doubledot_old[i] + beta_2*u_doubledot[i]);
u_dot[i] = u_dot[i] + dt*((1 - beta_1)*u_doubledot_old[i] + beta_1*u_doubledot[i]);
}
//int row1 = displaceInElem[sudo_force_index[0]][0];
//int row2 = displaceInElem[sudo_force_index[0]][1];
//u[row1] = sudo_force_value1[0]/100.0;
//u[row2] = sudo_force_value2[1]/100.0;
}
void Geometry::update_dynamic_xyz(){
for (int j = 0; j < numNodesZero; j++){
int row1 = displaceInElem[vector_zero_nodes[j]][0];
int row2 = displaceInElem[vector_zero_nodes[j]][1];
u[row1] = 0;
u[row2] = 0;
}
//int row1, row2;
//for (int i = 0; i < numNodesZero; i++){
// row1 = displaceInElem[vector_zero_nodes[i]][0];
// row2 = displaceInElem[vector_zero_nodes[i]][1];
//
// u[row1] = u[row2] = 0;
//}
///*for (int i = 0; i < numNodes; i++) {
// // u[180] = u[181] = 0.0;
// double d = (x_init[i] - x[i])*(x_init[i] - x[i]) + (y_init[i] - y[i])*(y_init[i] - y[i]);
// if (d > 0.00001){
// u[i * dim] = 0;
// u[i * dim + 1] = 0;
// }
//}*/
//}
//
for (int i = 0; i < numNodes; i++) {
//u[180] = u[181] = 0.0;
x[i] = x[i] + u[i * dim];
y[i] = y[i] + u[i * dim + 1];
}
}
void Geometry::update_vector(){ //solve Ax=b for the dynamics case
double duration_K;
this->set_zero_AxB();
/*
for (int col = 0; col < Ncols; col++){
L[IDX2C(col, 0, N)] = 0;
L[IDX2C(col, 1, N)] = 0;
L[IDX2C(col,2, N)] = 0;
L[IDX2C(col, 3, N)] = 0;
if (dim == 3){
L[IDX2C(col, 2, N)] = 0;
}
}
L[IDX2C(0, 0, N)] = 1.0;
L[IDX2C(1, 1, N)] = 1.0;
L[IDX2C(2, 2, N)] = 1.0;
L[IDX2C(3, 3, N)] = 1.0;
if (dim == 3){
L[IDX2C(2, 2, N)] = 1.0;
}
*/
/*for (int j = 0; j <10; j++){
for (int i = 0; i < 10; i++){
std::cout<< L[IDX2C(i, j, N)] << std::endl;
}
std::cout << std::endl;
}*/
// --- Create device array and copy host array to it
gpuErrchk(hipMemcpy(d_A_dense, L, Nrows * Ncols * sizeof(*d_A_dense), hipMemcpyHostToDevice));
// --- Descriptor for sparse matrix A
// --- Device side number of nonzero elements per row
cusparseSafeCall(hipsparseSnnz(handle, HIPSPARSE_DIRECTION_ROW, Nrows, Ncols, descrA, d_A_dense, lda, d_nnzPerVector, &nnz));
// --- Host side number of nonzero elements per row
gpuErrchk(hipMemcpy(h_nnzPerVector, d_nnzPerVector, Nrows * sizeof(*h_nnzPerVector), hipMemcpyDeviceToHost));
/*printf("Number of nonzero elements in dense matrix = %i\n\n", nnz);
for (int i = 0; i < 10; ++i) printf("Number of nonzero elements in row %i = %i \n", i, h_nnzPerVector[i]);
printf("\n");*/
// --- Device side dense matrix
gpuErrchk(hipMalloc(&d_A, nnz * sizeof(*d_A)));
gpuErrchk(hipMalloc(&d_A_ColIndices, nnz * sizeof(*d_A_ColIndices)));
cusparseSafeCall(hipsparseSdense2csr(handle, Nrows, Ncols, descrA, d_A_dense, lda, d_nnzPerVector, d_A, d_A_RowIndices, d_A_ColIndices));
std::clock_t start_K;
start_K = std::clock();
// --- Host side dense matrix
float *h_A = (float *)malloc(nnz * sizeof(*h_A));
int *h_A_RowIndices = (int *)malloc((Nrows + 1) * sizeof(*h_A_RowIndices));
int *h_A_ColIndices = (int *)malloc(nnz * sizeof(*h_A_ColIndices));
gpuErrchk(hipMemcpy(h_A, d_A, nnz*sizeof(*h_A), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(h_A_RowIndices, d_A_RowIndices, (Nrows + 1) * sizeof(*h_A_RowIndices), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(h_A_ColIndices, d_A_ColIndices, nnz * sizeof(*h_A_ColIndices), hipMemcpyDeviceToHost));
std::cout << nnz << std::endl;
/*printf("\nOriginal matrix in CSR format\n\n");
for (int i = 0; i < 10; ++i) printf("A[%i] = %.0f ", i, h_A[i]); printf("\n");
printf("\n");
for (int i = 0; i < (10 + 1); ++i) printf("h_A_RowIndices[%i] = %i \n", i, h_A_RowIndices[i]); printf("\n");
for (int i = 0; i < 10; ++i) printf("h_A_ColIndices[%i] = %i \n", i, h_A_ColIndices[i]);
*/
// --- Allocating and defining dense host and device data vectors
float *h_x = (float *)malloc(Nrows * sizeof(float));
/*h_x[0] = 100.0; h_x[1] = 200.0; h_x[2] = 400.0; h_x[3] = 500.0;*/
for (int i = 0; i < N; i++){
h_x[i] = b_rhs[i];
}
/*if (dim == 3){
h_x[0] = h_x[1] = h_x[2] = 0;
}
else {
h_x[0] = h_x[1] = 0;
h_x[2] = h_x[3] = 0;
}*/
float *d_x; gpuErrchk(hipMalloc(&d_x, Nrows * sizeof(float)));
gpuErrchk(hipMemcpy(d_x, h_x, Nrows * sizeof(float), hipMemcpyHostToDevice));
/******************************************/
/* STEP 1: CREATE DESCRIPTORS FOR L AND U */
/******************************************/
/********************************************************************************************************/
/* STEP 2: QUERY HOW MUCH MEMORY USED IN CHOLESKY FACTORIZATION AND THE TWO FOLLOWING SYSTEM INVERSIONS */
/********************************************************************************************************/
int pBufferSize_M, pBufferSize_L, pBufferSize_Lt;
cusparseSafeCall(hipsparseScsric02_bufferSize(handle, N, nnz, descrA, d_A, d_A_RowIndices, d_A_ColIndices, info_A, &pBufferSize_M));
cusparseSafeCall(hipsparseScsrsv2_bufferSize(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_L, &pBufferSize_L));
cusparseSafeCall(hipsparseScsrsv2_bufferSize(handle, HIPSPARSE_OPERATION_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_Lt, &pBufferSize_Lt));
int pBufferSize = max(pBufferSize_M, max(pBufferSize_L, pBufferSize_Lt));
void *pBuffer = 0; gpuErrchk(hipMalloc((void**)&pBuffer, pBufferSize));
/******************************************************************************************************/
/* STEP 3: ANALYZE THE THREE PROBLEMS: CHOLESKY FACTORIZATION AND THE TWO FOLLOWING SYSTEM INVERSIONS */
/******************************************************************************************************/
int structural_zero;
cusparseSafeCall(hipsparseScsric02_analysis(handle, N, nnz, descrA, d_A, d_A_RowIndices, d_A_ColIndices, info_A, HIPSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
hipsparseStatus_t status = hipsparseXcsric02_zeroPivot(handle, info_A, &structural_zero);
if (HIPSPARSE_STATUS_ZERO_PIVOT == status){ printf("A(%d,%d) is missing\n", structural_zero, structural_zero); }
cusparseSafeCall(hipsparseScsrsv2_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_L, HIPSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
cusparseSafeCall(hipsparseScsrsv2_analysis(handle, HIPSPARSE_OPERATION_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_Lt, HIPSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer));
/*************************************/
/* STEP 4: FACTORIZATION: A = L * L' */
/*************************************/
int numerical_zero;
cusparseSafeCall(hipsparseScsric02(handle, N, nnz, descrA, d_A, d_A_RowIndices, d_A_ColIndices, info_A, HIPSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
status = hipsparseXcsric02_zeroPivot(handle, info_A, &numerical_zero);
/*if (HIPSPARSE_STATUS_ZERO_PIVOT == status){ printf("L(%d,%d) is zero\n", numerical_zero, numerical_zero); }
*/
gpuErrchk(hipMemcpy(h_A, d_A, nnz * sizeof(float), hipMemcpyDeviceToHost));
/*printf("\nNon-zero elements in Cholesky matrix\n\n");
for (int k = 0; k<10; k++) printf("%f\n", h_A[k]);*/
cusparseSafeCall(hipsparseScsr2dense(handle, Nrows, Ncols, descrA, d_A, d_A_RowIndices, d_A_ColIndices, d_A_dense, Nrows));
/*printf("\nCholesky matrix\n\n");
for (int i = 0; i < 10; i++) {
std::cout << "[ ";
for (int j = 0; j < 10; j++)
std::cout << h_A_dense[i * Ncols + j] << " ";
std::cout << "]\n";
}*/
/*********************/
/* STEP 5: L * z = x */
/*********************/
// --- Allocating the intermediate result vector
float *d_z; gpuErrchk(hipMalloc(&d_z, N * sizeof(float)));
const float alpha = 1.;
cusparseSafeCall(hipsparseScsrsv2_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, nnz, &alpha, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_L, d_x, d_z, HIPSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
/**********************/
/* STEP 5: L' * y = z */
/**********************/
// --- Allocating the host and device side result vector
float *h_y = (float *)malloc(Ncols * sizeof(float));
float *d_y; gpuErrchk(hipMalloc(&d_y, Ncols * sizeof(float)));
cusparseSafeCall(hipsparseScsrsv2_solve(handle, HIPSPARSE_OPERATION_TRANSPOSE, N, nnz, &alpha, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_Lt, d_z, d_y, HIPSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer));
hipMemcpy(h_x, d_y, N * sizeof(float), hipMemcpyDeviceToHost);
printf("\n\nFinal result\n");
/*for (int k = 0; k<20; k++) printf("dx[%i] = %f\n", k, h_x[k]);
for (int k = 0; k<20; k++) printf("xs[%i] = %f\n", k, x[k]);*/
for (int i = 0; i < numNodes*dim; i++) {
u_doubledot_old[i] = u_doubledot[i];
u_doubledot[i] = h_x[i];
}
free(h_A);
free(h_A_RowIndices);
free(h_A_ColIndices);
//free(h_x);
free(h_y);
hipFree(d_x);
hipFree(pBuffer);
hipFree(d_z);
hipFree(d_y);
duration_K = (std::clock() - start_K) / (double)CLOCKS_PER_SEC;
//std::cout << " change status : " << changeNode << std::endl;
//std::cout << "FPS time: " <<1/duration_K << std::endl;
//std::cout << "Duration: " << duration_K << std::endl;
}
void Geometry::Linear2DBarycentric_B(int *nodes, double *x, double *y, double **term){
//
double J = Linear2DJacobianDet_Barycentric(nodes, x, y);
double y23 = y[nodes[1]] - y[nodes[2]];//y23
double y31 = y[nodes[2]] - y[nodes[0]];//y31
double y12 = y[nodes[0]] - y[nodes[1]];//y12
double x32 = x[nodes[2]] - x[nodes[1]];//x32
double x13 = x[nodes[0]] - x[nodes[2]];//x13
double x21 = x[nodes[1]] - x[nodes[0]];//x21
for (int row = 0; row < 3; row++){
for (int col = 0; col < 6; col++){
term[row][col] = 0;
}
}
term[0][0] = term[2][1] = y23 / (J);
term[0][2] = term[2][3] = y31 / (J);
term[0][4] = term[2][5] = y12 / (J);
term[1][1] = term[2][0] = x32 / (J);
term[1][3] = term[2][2] = x13 / (J);
term[1][5] = term[2][4] = x21 / (J);
/*else {
double **A = new double*[4];
double **T = new double*[3];
double **result = new double*[3];
for (int i = 0; i < 4; i++){
A[i] = new double[6];
}
for (int i = 0; i < 3; i++){
T[i] = new double[4];
result[i] = new double[6];
}
for (int row = 0; row < 3; row++){
for (int col = 0; col < 4; col++){
T[row][col] = 0;
}
}
T[0][0] = T[1][3] = T[2][1] = T[2][2] = 1;
A[0][1] = A[0][3] = A[0][5] = 0;
A[1][1] = A[1][3] = A[1][5] = 0;
A[2][0] = A[2][2] = A[2][4] = 0;
A[3][0] = A[3][2] = A[3][4] = 0;
A[0][0] = A[2][1] = y23/J;
A[0][2] = A[2][3] = y31/J;
A[0][4] = A[2][5] = y12 / J;
A[1][0] = A[3][1] = x32 / J;
A[1][2] = A[3][3] = x13 / J;
A[1][4] = A[3][5] = x21 / J;
}
*/
//MatrixTimes(term, T, A, 3, 4, 4, 6);
}
void Geometry::Linear3DBarycentric_B(int *nodes, double *x, double *y, double *z, double **term){
//
double x14 = x[nodes[0]] - x[nodes[3]];
double x24 = x[nodes[1]] - x[nodes[3]];
double x34 = x[nodes[2]] - x[nodes[3]];
double y14 = y[nodes[0]] - y[nodes[3]];
double y24 = y[nodes[1]] - y[nodes[3]];
double y34 = y[nodes[2]] - y[nodes[3]];
double z14 = z[nodes[0]] - z[nodes[3]];
double z24 = z[nodes[1]] - z[nodes[3]];
double z34 = z[nodes[2]] - z[nodes[3]];
double J = x14*(y24*z34 - y34*z24) - y14*(x24*z34 - z24 * x34) + z14*(x24*y34 - y24*x34);
double J_bar11 = (y24*z34 - z24*y34) / J;
double J_bar12 = (z14*y34 - y14*z34) / J;
double J_bar13 = (y14*z24 - z14*y24) / J;
double J_bar21 = (z24*x34 - x24*z34) / J;
double J_bar22 = (x14*z34 - z14*x34) / J;
double J_bar23 = (z14*x24 - x14*z24) / J;
double J_bar31 = (x24*y34 - y24*x34) / J;
double J_bar32 = (y14*x34 - x14*y34) / J;
double J_bar33 = (x14*y24 - y14*x24) / J;
/* term[0][0] = (y24*z34 - z24*y34) / J;
term[0][1]= (z14*y34 - y14*z34) / J;
term[0][2] =(y14*z24 - z14*y24) / J;
term[1][0]= (z24*x34 - x24*z24) / J;
term[1][1] = (x14*z34 - z14*x34) / J;
term[1][2] = (z14*x24 - x14*z24) / J;
term[2][0]= (x24*y34 - y24*x34) / J;
term[2][1]= (y14*x34 - x14*y34) / J;
term[2][2] = (x14*y24 - y14*x24) / J;*/
double J_star1 = -(J_bar11 + J_bar12 + J_bar13);
double J_star2 = -(J_bar21 + J_bar22 + J_bar23);
double J_star3 = -(J_bar31 + J_bar32 + J_bar33);
/*double **A = new double*[4];
double **T = new double*[3];
double **result = new double*[3];
for (int i = 0; i < 4; i++){
A[i] = new double[6];
}
for (int i = 0; i < 3; i++){
T[i] = new double[4];
result[i] = new double[6];
}
for (int row = 0; row < 3; row++){
for (int col = 0; col < 4; col++){
T[row][col] = 0;
}
}
T[0][0] = T[1][3] = T[2][1] = T[2][2] = 1;
A[0][1] = A[0][3] = A[0][5] = 0;
A[1][1] = A[1][3] = A[1][5] = 0;
A[2][0] = A[2][2] = A[2][4] = 0;
A[3][0] = A[3][2] = A[3][4] = 0;
A[0][0] = A[2][1] = y23/J;
A[0][2] = A[2][3] = y31/J;
A[0][4] = A[2][5] = y12 / J;
A[1][0] = A[3][1] = x32 / J;
A[1][2] = A[3][3] = x13 / J;
A[1][4] = A[3][5] = x21 / J;*/
for (int row = 0; row < 6; row++){
for (int col = 0; col < 12; col++){
term[row][col] = 0;
}
}
term[0][0] = term[3][1] = term[5][2] = J_bar11;
term[1][1] = term[3][0] = term[4][2] = J_bar21;
term[2][2] = term[5][0] = term[4][1] = J_bar31;
term[0][3] = term[3][4] = term[5][5] = J_bar12;
term[1][4] = term[3][3] = term[4][5] = J_bar22;
term[2][5] = term[4][4] = term[5][3] = J_bar32;
term[0][6] = term[3][7] = term[5][8] = J_bar13;
term[1][7] = term[3][6] = term[4][8] = J_bar23;
term[2][8] = term[4][7] = term[5][6] = J_bar33;
term[0][9] = term[3][10] = term[5][11] = J_star1;
term[1][10] = term[3][9] = term[4][11] = J_star2;
term[2][11] = term[4][10] = term[5][9] = J_star3;
//for (int row = 0; row < 6; row++){
// for (int col = 0; col < 12; col++){
// std::cout << term[row][col] << " ";
// }
// std::cout << std::endl;
//}
//MatrixTimes(term, T, A, 3, 4, 4, 6);
}
double Geometry::Linear2DJacobianDet_Barycentric(int *nodes, double *x, double *y){
double x13 = x[nodes[0]] - x[nodes[2]];
double x23 = x[nodes[1]] - x[nodes[2]];
double y13 = y[nodes[0]] - y[nodes[2]];
double y23 = y[nodes[1]] - y[nodes[2]];
return (x13*y23 - y13*x23);
}
double Geometry::Linear3DJacobianDet_Barycentric(int *nodes, double *x, double *y, double *z){
double x14 = x[nodes[0]] - x[nodes[3]];
double x24 = x[nodes[1]] - x[nodes[3]];
double x34 = x[nodes[2]] - x[nodes[3]];
double y14 = y[nodes[0]] - y[nodes[3]];
double y24 = y[nodes[1]] - y[nodes[3]];
double y34 = y[nodes[2]] - y[nodes[3]];
double z14 = z[nodes[0]] - z[nodes[3]];
double z24 = z[nodes[1]] - z[nodes[3]];
double z34 = z[nodes[2]] - z[nodes[3]];
//std::cout << x14*(y24*z34 - y34*z24) - y14*(x24*z34 - z24 * 34) + z14*(x24*y34 - y24*x34) << std::endl;
return (x14*(y24*z34 - y34*z24) - y14*(x24*z34 - z24 *x34) + z14*(x24*y34 - y24*x34));
}
void Geometry::Linear2DBarycentric_D(double **term, double nu, double youngE){
for (int i = 0; i < 3; i++){
for (int j = 0; j < 3; j++){
term[i][j] = 0;
}
}
/*term[0][0] = term[1][1] = 1;
term[0][1] = term[1][0] = nu;
term[2][2] = (1 - nu) / 2;*/
#if 1 // for plane stress
term[0][0] = term[1][1] = 1.0;
term[0][1] = term[1][0] = nu;
term[2][2] = (1 - nu) / 2.0;
#endif // 0
#if 0 //for plane strain
term[0][0] = 1.0;
term[1][1] = nu;
term[0][1] = term[1][0] = nu;
term[2][2] = (1.0 - nu) / 2.0;
#endif // 0
#if 0 //We won't use this here becuase too much floating errors
for (int i = 0; i < 3; i++){
for (int j = 0; j < 3; j++){
//term[i][j] = (youngE / (1 - nu*nu))*term[i][j];
//term[i][j] = (youngE / ((1.0 - nu*nu)))*term[i][j]; I will multiply this huge number after the B^T D B
}
}
#endif
}
void Geometry::Linear3DBarycentric_D(double **term, double nu, double youngE){
int multi = 2;
for (int i = 0; i < 3 * multi; i++){
for (int j = 0; j < 3 * multi; j++){
term[i][j] = 0;
}
}
/*term[0][0] = term[1][1] = 1;
term[0][1] = term[1][0] = nu;
term[2][2] = (1 - nu) / 2;*/
term[0][0] = term[1][1] = term[2][2] = (1.0 - nu);
term[0][1] = term[1][0] = term[0][2] = term[2][0] = term[1][2] = term[2][1] = nu;
term[3][3] = term[4][4] = term[5][5] = (1.0 - nu) / 2.0;
for (int i = 0; i < 3 * multi; i++){
for (int j = 0; j < 3 * multi; j++){
//term[i][j] = (youngE / (1 - nu*nu))*term[i][j];
term[i][j] = (youngE / ((1 - 2 * nu)*(1 + nu)))*term[i][j];
}
}
//for (int i = 0; i < 3 * multi; i++){
// for (int j = 0; j < 3 * multi; j++){
// std::cout << term[i][j] << " ";
// }
// std::cout << std::endl;
//}
}
void Geometry::AssembleLocalElementMatrixBarycentric2D(int elem_n,int *nodes,int **displaceinE, double *x, double *y, int dimension, double **E,double **M, double nu, double youngE, double thickness)
{
// thte dimension for B is 3x6
int n = 3;
//declear how many rows that we will have,
double **B = new double*[n];
double **D = new double*[n];
double **B_TXD = new double*[n * 2];
double **integrand = new double*[n * 2];
double **DB = new double*[n];
double *stress = new double[n];
//Now we will loop through the columns
for (int i = 0; i < n; i++){
B[i] = new double[n * 2];
D[i] = new double[n];
DB[i] = new double[n * 2];
stress[i] = 0;//initializing the stress vector
}
for (int i = 0; i <n * 2; i++){
B_TXD[i] = new double[n];
integrand[i] = new double[n * 2];
}
double J = Linear2DJacobianDet_Barycentric(nodes, x, y);
for (int row = 0; row < n * 2; row++){
for (int col = 0; col < n; col++){
B_TXD[row][col] = 0;
}
}
for (int row = 0; row < n; row++){
for (int col = 0; col < n * 2; col++){
DB[row][col] = 0;
}
}
for (int row = 0; row < n * 2; row++){
for (int col = 0; col < n * 2; col++){
integrand[row][col] = 0;
}
}
//Allocating the B and D matrices
Linear2DBarycentric_B(nodes, x, y, B);
Linear2DBarycentric_D(D, nu, youngE);
//std::cout << "B:MATRIX: " << std::endl;
//for (int row = 0; row < 3; row++){
// for (int col = 0; col < 6; col++){
// std::cout << B[row][col]<< " ";
// }
// std::cout << std::endl;
//}
//Finding B^T*D
for (int row = 0; row < n * 2; row++){
for (int col = 0; col < n; col++){
for (int k = 0; k < n; k++){
B_TXD[row][col] = B_TXD[row][col] + B[k][row] * D[k][col];
}
}
}
//Finding B^T*D*B
for (int row = 0; row < n * 2; row++){
for (int col = 0; col < n * 2; col++){
for (int k = 0; k < n; k++){
integrand[row][col] = integrand[row][col] + B_TXD[row][k] * B[k][col];
}
}
}
if (get_dynamic()){
//Find von-mises stresses
//First is D*B [DONT FORGET TO MULTIPLY BY (youngE / ((1.0 - nu*nu)))*thickness]
for (int row = 0; row < n; row++){
for (int col = 0; col < n * 2; col++){
for (int k = 0; k < n; k++){
DB[row][col] = DB[row][col] + D[row][k] * B[k][col];
}
}
}
for (int row = 0; row < n; row++){
for (int col = 0; col < n; col++){
for (int k = 0; k < 2; k++){
stress[row] = stress[row] + DB[row][col * 2 + k] * u[displaceinE[nodes[col]][k]];
}
}
#if 1 // for plain stress
stress[row] = stress[row] * (youngE / ((1.0 - nu*nu)))*thickness*(J / 2.0);
#endif // 0 // for plain stress
#if 0 // for plain strain
stress[row] = stress[row] * (youngE / ((1.0+nu)*(1-2*nu)))*thickness*(J / 2.0);
#endif // 0 // for plain stress
}
global_stress_mises[elem_n] = sqrt((stress[0] + stress[1])*(stress[0] + stress[1]) - 3 * (stress[0] * stress[1] - stress[2] * stress[2]));
#if 0 ///Print strain out
std::cout << "DB : " << std::endl;
for (int row = 0; row < 3; row++){
for (int col = 0; col < 6; col++){
std::cout << DB[row][col] << " ";
}
std::cout<<std::endl;
}
#endif
#if 0
std::cout << " stress : " << std::endl;
for (int row = 0; row < 3; row++){
std::cout << stress[row] << std::endl;
}
#endif
#if 0
std::cout << global_stress_mises[elem_n] << std::endl;
#endif
#if 0
for (int row = 0; row < 3; row++){
for (int k = 0; k < 2; k++){
std::cout << DB[row][nodes[row]] << std::endl;
}
}
#endif
//for (int row = 0; row < 6; row++){
// for (int col = 0; col < 3; col++){
// B_T[row][col] = B[col][row];
// }
//}
}
for (int row = 0; row < n * 2; row++){
for (int col = 0; col < n * 2; col++){
//row = row ^ 2;
E[row][col] = (youngE / ((1.0 - nu*nu)))*thickness*(J / 2.0) * integrand[row][col];
}
}
//std::cout << "K_elem : " << std::endl;
//for (int row = 0; row < 6; row++){
// for (int col = 0; col < 6; col++){
// std::cout << E[row][col] << " ";
// }
// std::cout << std::endl;
//}
//
double A = J / 2;
double X_i = x[0];
double X_j = x[1];
double X_k = x[2];
double Y_i = y[0];
double Y_j = y[1];
double Y_k = y[2];
double Z_i = z[0];
double Z_j = z[1];
double Z_k = z[2];
double a_i = X_j*Y_k - X_k*Y_j;
double a_j = X_k*Y_i - X_i*Y_k;
double a_k = X_i*Y_j - X_j*Y_i;
double b_i = Y_j - Y_k;
double b_j = Y_k - Y_i;
double b_k = Y_i - Y_j;
double c_i = X_k - X_j;
double c_j = X_i - X_k;
double c_k = X_j - X_i;
double rho = 1000.0;
if (get_dynamic()){
M[0][0] = 2 * A*rho*thickness / 3.0;
M[0][1] = 0.0;
M[0][2] = A*rho*thickness / 2.0;
M[0][3] = 0.0;
M[0][4] = -A*rho*thickness / 6.0;
M[0][5] = 0.0;
M[1][0] = 0.0;
M[1][1] = 2 * A*rho*thickness / 3.0;
M[1][2] = 0.0;
M[1][3] = A*rho*thickness / 2.0;
M[1][4] = 0.0;
M[1][5] = -A*rho*thickness / 6.0;
M[2][0] = A*rho*thickness / 2.0;
M[2][1] = 0.0;
M[2][2] = 2 * A*rho*thickness / 3.0;
M[2][3] = 0.0;
M[2][4] = -A*rho*thickness / 6.0;
M[2][5] = 0.0;
M[3][0] = 0.0;
M[3][1] = A*rho*thickness / 2.0;
M[3][2] = 0.0;
M[3][3] = 2.0* A*rho*thickness / 3.0;
M[3][4] = 0.0;
M[3][5] = -A*rho*thickness / 6.0;
M[4][0] = -A*rho*thickness / 6.0;
M[4][1] = 0.0;
M[4][2] = -A*rho*thickness / 6.0;
M[4][3] = 0.0;
M[4][4] = A*rho*thickness / 3.0;
M[4][5] = 0.0;
M[5][0] = 0.0;
M[5][1] = -A*rho*thickness / 6.0;
M[5][2] = 0.0;
M[5][3] = -A*rho*thickness / 6.0;
M[5][4] = 0.0;
M[5][5] = A*rho*thickness / 3.0;
}
for (int i = 0; i < n; i++){
delete B[i];
delete D[i];
delete DB[i];
}
for (int i = 0; i < n * 2; i++){
delete B_TXD[i];
delete integrand[i];
}
delete[] B;
delete[] D;
delete[] B_TXD;
delete[] integrand;
delete[] DB;
delete[] stress;
}
//**************************3D************************************//
//3333333333333333333333333333333333333333333333333333333333333333//
//DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD//
//****************************************************************//
void Geometry::AssembleLocalElementMatrixBarycentric3D(int *nodes, double *x, double *y, double *z, int dimension, double **E, double nu, double youngE, double thickness)
{
int multi = 2;
double **B = new double*[3 * multi];
double **D = new double*[3 * multi];
double **B_TXD = new double*[6 * multi];
double **integrand = new double*[6 * multi];
for (int i = 0; i < 3 * multi; i++){
B[i] = new double[6 * multi];
D[i] = new double[3 * multi];
}
for (int i = 0; i < 6 * multi; i++){
B_TXD[i] = new double[3 * multi];
integrand[i] = new double[6 * multi];
}
double J = Linear3DJacobianDet_Barycentric(nodes, x, y, z);
for (int row = 0; row < 6 * multi; row++){
for (int col = 0; col < 3 * multi; col++){
B_TXD[row][col] = 0;
}
}
for (int row = 0; row < 6 * multi; row++){
for (int col = 0; col < 6 * multi; col++){
integrand[row][col] = 0;
}
}
//Allocating the B and D matrices
Linear3DBarycentric_B(nodes, x, y, z, B);
Linear3DBarycentric_D(D, nu, youngE);
//std::cout << "B:MATRIX: " << std::endl;
//for (int row = 0; row < 3*multi; row++){
// for (int col = 0; col < 6*multi; col++){
// std::cout << B[row][col]<< " ";
// }
// std::cout << std::endl;
//}
//Finding B^T*D
for (int row = 0; row < 6 * multi; row++){
for (int col = 0; col < 3 * multi; col++){
for (int k = 0; k < 3 * multi; k++){
B_TXD[row][col] = B_TXD[row][col] + B[k][row] * D[k][col];
}
}
}
//Finding B^T*D*B
for (int row = 0; row < 6 * multi; row++){
for (int col = 0; col < 6 * multi; col++){
for (int k = 0; k < 3 * multi; k++){
integrand[row][col] = integrand[row][col] + B_TXD[row][k] * B[k][col];
}
}
}
//std::cout << "B_T x D : " << std::endl;
//for (int row = 0; row < 6; row++){
// for (int col = 0; col < 3; col++){
// std::cout << B_TXD[row][col] << " ";
// }
// std::cout<<std::endl;
//}
//for (int row = 0; row < 6; row++){
// for (int col = 0; col < 3; col++){
// B_T[row][col] = B[col][row];
// }
//}
for (int row = 0; row < 6 * multi; row++){
for (int col = 0; col < 6 * multi; col++){
E[row][col] = integrand[row][col] * J / 6.0;
}
}
//std::cout << "K_elem : " << std::endl;
//for (int row = 0; row < 6*multi; row++){
// for (int col = 0; col < 6*multi; col++){
// std::cout << E[row][col] << " ";
// }
// std::cout << std::endl;
//}
for (int i = 0; i < 3 * multi; i++){
delete B[i];
delete D[i];
}
for (int i = 0; i < 6 * multi; i++){
delete B_TXD[i];
delete integrand[i];
}
delete[] B;
delete[] D;
delete[] B_TXD;
delete[] integrand;
}
//3D
void Geometry::Linear3DBarycentric_B_CUDA_host(){
int dummy_var;
//dim3 blocks(1, 1, numE/5);//numE / (dim)
//dim3 threads(numNodesPerElem*dim, numNodesPerElem*dim, 5);
//dim3 blocks(144, (int)numE /( 32*15));//numE / (dim)
//dim3 threads(1, (int)(32 * 15));
//working 2d cuda
dim3 blocks(84, 196);//numE / (dim)
dim3 threads(12, 12);
/*for (int j = 0; j < numE;j++){
for (int i = 0; i < numNodesPerElem; i++){
nodesInElem_device[j][i] = nodesInElem[j][i];
}
}
*/
hipMemcpy(d_x_dist, x, numNodes*sizeof(*d_x_dist), hipMemcpyHostToDevice);
hipMemcpy(d_y_dist, y, numNodes*sizeof(*d_x_dist), hipMemcpyHostToDevice);
hipMemcpy(d_z_dist, z, numNodes*sizeof(*d_x_dist), hipMemcpyHostToDevice);
//hipMemcpy(nodesInElem_device, nodesInElem, numE*numNodesPerElem*sizeof(int), hipMemcpyHostToDevice);
int max_limit = (numNodesPerElem*dim*numNodesPerElem*dim*numE);
int threadsPerBlock = 256;
int blocksPerGrid = (max_limit + threadsPerBlock - 1) / threadsPerBlock;
/*hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);*/
hipMemset(d_A_dense, 0, numNodes*dim*numNodes*dim*sizeof(*d_A_dense));
hipMemcpy(dev_numNodes, &numNodes, 1 * sizeof(int), hipMemcpyHostToDevice);
make_K_cuda3d << < 192, 128 >> >(E_vector_device, nodesInElem_device, d_x_dist, d_y_dist, d_z_dist, displaceInElem_device, d_A_dense, dev_numNodes);
std::clock_t cuda_K;
cuda_K = std::clock();
//hipMemcpy(h_A_dense, d_A_dense, numNodes*dim*numNodes*dim*sizeof(*d_A_dense), hipMemcpyDeviceToHost);
double duration_K = (std::clock() - cuda_K) / (double)CLOCKS_PER_SEC;
std::cout << "cuda k assmeble: " << duration_K << std::endl;
//hipMemcpy(E_vector_host, E_vector_device, numNodesPerElem*dim*numNodesPerElem*dim*numE*sizeof(double), hipMemcpyDeviceToHost);
//hipMemcpy(x, d_x, numNodes*sizeof(double), hipMemcpyDeviceToHost);
//hipMemcpy(nodesInElem_host, nodesInElem_device, numE*numNodesPerElem*sizeof(int), hipMemcpyDeviceToHost);
//std::cout << " K _ CUDA " << std::endl;
////for (int j = 0; j < 2; j++){
//// for (int i = 0; i < numNodesPerElem; i++){
//// std::cout << nodesInElem_host[nodesinelemX(i, j, numNodesPerElem)] << " ";
//// }
//// std::cout << std::endl;
////}
//for (int j = 0; j < 10; j++){
// for (int i = 0; i < 10; i++){
// std::cout << h_A_dense[IDX2C(i, j, 3000)] << " ";
// }
// std::cout << std::endl;
//}
////Print local K matrix
//for (int e = 0; e < numE; e++){
// //std::cout << "element : " << e << std::endl;
// for (int i = 0; i < numNodesPerElem*dim; i++){
// for (int j = 0; j < numNodesPerElem*dim; j++){
//
// //E[e][i][j] = E_vector_host[threeD21D(i, j, e, numNodesPerElem*dim, numNodesPerElem*dim)];
// //std::cout << E[e][i][j] << " ";
// }
// //std::cout << std::endl;
// }
// //std::cout << std::endl;
//}
//std::cout << std::endl << " the x value : " << x[0] << std::endl;
/*(hipMemcpy(&c, dev_c, sizeof(int),
hipMemcpyDeviceToHost));
printf("2 + 7 = %d\n", c);
(hipFree(dev_c));*/
}
//2D
void Geometry::Linear2DBarycentric_B_CUDA_host(){
hipMemcpy(d_x, x, numNodes*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, numNodes*sizeof(double), hipMemcpyHostToDevice);
dim3 blocks((numE+35)/35 , 1);//numE / (dim)
dim3 threads(36,36);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
hipMemset(d_A_dense, 0.0, numNodes*dim*numNodes*dim*sizeof(*d_A_dense));
make_K_cuda2d << <147,112 >> >(E_vector_device, nodesInElem_device, d_x, d_y, displaceInElem_device, d_A_dense, numNodes, thickness, Young, Poisson, c_alpha, beta_1, beta_2, density, dt, c_xi, numE);
//make_K_cuda2d << <blocks, threads >> >(E_vector_device, nodesInElem_device, d_x, d_y, displaceInElem_device, d_A_dense, numNodes, thickness, Young, Poisson, c_alpha, beta_1, beta_2, density, dt, c_xi, numE);
hipMemcpy(h_A_dense, d_A_dense, numNodes*dim*numNodes*dim*sizeof(*d_A_dense), hipMemcpyDeviceToHost);
//hipMemcpy(E_vector_host, E_vector_device, numNodesPerElem*dim*numNodesPerElem*dim*numE*sizeof(double), hipMemcpyDeviceToHost);
//hipMemcpy(x, d_x, numNodes*sizeof(double), hipMemcpyDeviceToHost);
//hipMemcpy(nodesInElem_host, nodesInElem_device, numE*numNodesPerElem*sizeof(int), hipMemcpyDeviceToHost);
//std::cout << " K _ CUDA " << std::endl;
////for (int j = 0; j < 2; j++){
//// for (int i = 0; i < numNodesPerElem; i++){
//// std::cout << nodesInElem_host[nodesinelemX(i, j, numNodesPerElem)] << " ";
//// }
//// std::cout << std::endl;
////}
//for (int j = 0; j < 10; j++){
// for (int i = 0; i < 10; i++){
// std::cout << h_A_dense[IDX2C(i, j, numNodes*2)] << " ";
// }
// std::cout << std::endl;
//}
////Print local K matrix
//for (int e = 0; e < numE; e++){
// //std::cout << "element : " << e << std::endl;
// for (int i = 0; i < numNodesPerElem*dim; i++){
// for (int j = 0; j < numNodesPerElem*dim; j++){
//
// //E[e][i][j] = E_vector_host[threeD21D(i, j, e, numNodesPerElem*dim, numNodesPerElem*dim)];
// //std::cout << E[e][i][j] << " ";
// }
// //std::cout << std::endl;
// }
// //std::cout << std::endl;
//}
//std::cout << std::endl << " the x value : " << x[0] << std::endl;
/*(hipMemcpy(&c, dev_c, sizeof(int),
hipMemcpyDeviceToHost));
printf("2 + 7 = %d\n", c);
(hipFree(dev_c));*/
}
void Geometry::make_surface_f(){
}
void Geometry::ApplyEssentialBoundaryConditionsBarycentric(int numP, int numBC, int *localcoord, int *elemForce, double forceVec_x, double forceVec_y, double *f, double **K, int **nodesInElem, double thickness, double *x, double *y, int **displaceInElem){
int local; // used to store local coord info
int node_interest[2];// use two ints to tell us which 2 of the nodes in the element would be useful
int row, col;
int element;
int node;
double length;
double x_1, y_1, x_2, y_2;
//for (int i = 0; i < numBC; i++){
elemForce[0] = numBC;
//local = localcoord[i];
int i = 0;
local = 1;
if (local == 0){//Opposite to xi_1 direction
node_interest[0] = 1;
node_interest[1] = 2;
x_1 = x[nodesInElem[elemForce[i]][node_interest[0]]];
y_1 = y[nodesInElem[elemForce[i]][node_interest[0]]];
x_2 = x[nodesInElem[elemForce[i]][node_interest[1]]];
y_2 = y[nodesInElem[elemForce[i]][node_interest[1]]];
length = sqrt(pow(x_1 - x_2, 2.0) + pow(y_1 - y_2, 2.0));
}
else if (local == 1){//Opposite to xi_2 direction
node_interest[0] = 0;
node_interest[1] = 2;
x_1 = x[nodesInElem[elemForce[i]][node_interest[0]]];
y_1 = y[nodesInElem[elemForce[i]][node_interest[0]]];
x_2 = x[nodesInElem[elemForce[i]][node_interest[1]]];
y_2 = y[nodesInElem[elemForce[i]][node_interest[1]]];
length = sqrt(pow(x_1 - x_2, 2.0) + pow(y_1 - y_2, 2.0));
}
else if (local == 2){ // Opposite to xi_3 direction
node_interest[0] = 0;
node_interest[1] = 1;
x_1 = x[nodesInElem[elemForce[i]][node_interest[0]]];
y_1 = y[nodesInElem[elemForce[i]][node_interest[0]]];
x_2 = x[nodesInElem[elemForce[i]][node_interest[1]]];
y_2 = y[nodesInElem[elemForce[i]][node_interest[1]]];
length = sqrt(pow(x_1 - x_2, 2.0) + pow(y_1 - y_2, 2.0));
}
//cout << endl << "length: " << length << endl;
element = elemForce[i];
for (int node_c = 0; node_c < 2; node_c++){
node = nodesInElem[element][node_interest[node_c]];
for (int dof = 0; dof < 2; dof++){
row = displaceInElem[node][dof];
for (int dummy_V = 0; dummy_V < numP; dummy_V++){
//K[row][dummy_V] = 0;
}
//K[row][row] = 1;
if (dof == 0){
//f[row] = f[row] + (length*thickness / 2)*forceVec_x[i];
f[row] = f[row] + (length*thickness / 2)*forceVec_x;
}
else if (dof == 1){
//f[row] = f[row] + (length*thickness / 2)*forceVec_y[i];
f[row] = f[row] + (length*thickness / 2)*forceVec_y;
}
}
}
//}
}
void Geometry::ApplySudoForcesBarycentric(int numP, int node_applied, int *localcoord, int *elemForce, double forceVec_x, double forceVec_y, double *g, int **nodesInElem, double thickness, double *x, double *y, int **displaceInElem){
int local; // used to store local coord info
int node_interest[2];// use two ints to tell us which 2 of the nodes in the element would be useful
int row, col;
int element;
int node;
double length;
double x_1, y_1, x_2, y_2;
for (int dummy_V = 0; dummy_V < numP; dummy_V++){
f[dummy_V] = 0;
}
#if 1
//cout << endl << "length: " << length << endl;
//int node_c = node_applied;
double f1, f2;
for (int findex = 0; findex < 4; findex++){
int node_c = sudo_force_index[findex];
//******************************THIS NEEDS CHANGING **************************************//
if (findex == 0){
f1 = sudo_force_value1[0];
f2 = sudo_force_value1[1];
}
else if (findex == 1){
f1 = sudo_force_value2[0];
f2 = sudo_force_value2[1];
}
else if (findex ==2){
f1 = sudo_force_value3[0];
f2 = sudo_force_value3[1];
}
else if (findex == 3){
f1 = sudo_force_value4[0];
f2 = sudo_force_value4[1];
}
/*double forceVec_x1 = sudo_force_value2[0];
double forceVec_y1 = sudo_force_value2[1];*/
for (int dof = 0; dof < dim; dof++){
row = displaceInElem[node_c][dof];
for (int dummy_V = 0; dummy_V < numP; dummy_V++){
//K[row][dummy_V] = 0;
}
//K[row][row] = 1;
if (dof == 0){
f[row] += f1;
}
else if (dof == 1){
f[row] += f2;
}
else if (dof == 2){
// f[row] += forceVec_y1;
}
}
}
#endif // 0
}
void Geometry::set_zero_nodes(int *points){
//We have to make the corresponding L matrix and rhs vectors set to the correct values.
for (int i = 0; i < numNodesZero; i++){
vector_zero_nodes[i] = points[i];
}
}
void Geometry::set_zero_AxB(void){
int row1, row2;
#if 1
for (int i = 0; i < numNodesZero; i++){
row1 = displaceInElem[vector_zero_nodes[i]][0];
row2 = displaceInElem[vector_zero_nodes[i]][1];
for (int col = 0; col < Ncols; col++){
L[IDX2C(col, row1, N)] = 0.0;
L[IDX2C(col, row2, N)] = 0.0;
}
L[IDX2C(row1, row1, N)] = 1.0;
L[IDX2C(row2, row2, N)] = 1.0;
b_rhs[row1] = 0.0;
b_rhs[row2] = 0.0;
}
#else
for (int i = 0; i < numNodesZero; i++){
row1 = displaceInElem[vector_zero_nodes[i]][0];
row2 = displaceInElem[vector_zero_nodes[i]][1];
for (int col = 0; col < Ncols; col++){
h_A_dense[IDX2C(col, row1, N)] = 0.0;
h_A_dense[IDX2C(col, row2, N)] = 0.0;
}
h_A_dense[IDX2C(row1, row1, N)] = 1.0;
h_A_dense[IDX2C(row2, row2, N)] = 1.0;
/*f[row1] = 0.0;
f[row2] = 0.0;*/
b_rhs[row1] = 0.0;
b_rhs[row2] = 0.0;
}
#endif // 0
#if 0
row1 = displaceInElem[sudo_force_index[0]][0];
row2 = displaceInElem[sudo_force_index[0]][1];
for (int col = 0; col < Ncols; col++){
L[IDX2C(col, row1, N)] = 0.0;
L[IDX2C(col, row2, N)] = 0.0;
}
L[IDX2C(row1, row1, N)] = 1.0;
L[IDX2C(row2, row2, N)] = 1.0;
b_rhs[row1] = (1.0 / beta_2)*((sudo_force_value1[0] - dt*u_dot[row1]) - (1.0 - beta_2)*u_doubledot_old[row1]);
b_rhs[row2] = (1.0 / beta_2)*((sudo_force_value1[1] - dt*u_dot[row2]) - (1.0 - beta_2)*u_doubledot_old[row2]);;
#endif // 0
}
void Geometry::initialize_CUDA(void){
Nrows = numNodes*dim; // --- Number of rows
Ncols = numNodes*dim; // --- Number of columns
N = Nrows;
cusparseSafeCall(hipsparseCreate(&handle));
//h_A_dense = (float*)malloc(Nrows*Ncols*sizeof(*h_A_dense));
cusparseSafeCall(hipsparseCreateMatDescr(&descrA));
cusparseSafeCall(hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL));
cusparseSafeCall(hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE));
nnz = 0; // --- Number of nonzero elements in dense matrix
lda = Nrows; // --- Leading dimension of dense matrix
gpuErrchk(hipMalloc(&d_nnzPerVector, Nrows * sizeof(*d_nnzPerVector)));
h_nnzPerVector = (int *)malloc(Nrows * sizeof(*h_nnzPerVector));
//device side dense matrix
gpuErrchk(hipMalloc(&d_A_RowIndices, (Nrows + 1) * sizeof(*d_A_RowIndices)));
hipMalloc((void **)&dev_numNodes, sizeof(dev_numNodes));
hipMemcpy(dev_numNodes, &numNodes, sizeof(dev_numNodes), hipMemcpyHostToDevice);
//hipMemcpy(&numNodes,dev_numNodes , sizeof(dev_numNodes), hipMemcpyDeviceToHost);
cusparseSafeCall(hipsparseCreateMatDescr(&descr_L));
cusparseSafeCall(hipsparseSetMatIndexBase(descr_L, HIPSPARSE_INDEX_BASE_ONE));
cusparseSafeCall(hipsparseSetMatType(descr_L, HIPSPARSE_MATRIX_TYPE_GENERAL));
cusparseSafeCall(hipsparseSetMatFillMode(descr_L, HIPSPARSE_FILL_MODE_LOWER));
cusparseSafeCall(hipsparseSetMatDiagType(descr_L, HIPSPARSE_DIAG_TYPE_NON_UNIT));
//emeory in cholesky
cusparseSafeCall(hipsparseCreateCsric02Info(&info_A));
cusparseSafeCall(hipsparseCreateCsrsv2Info(&info_L));
cusparseSafeCall(hipsparseCreateCsrsv2Info(&info_Lt));
}
int Geometry::tt()
{
// --- Initialize cuSPARSE
// --- Host side dense matrix
double duration_K;
// --- Column-major ordering
/*h_A_dense[0] = 0.4612f; h_A_dense[4] = -0.0006f; h_A_dense[8] = 0.3566f; h_A_dense[12] = 0.0f;
h_A_dense[1] = -0.0006f; h_A_dense[5] = 0.4640f; h_A_dense[9] = -1000.0723f; h_A_dense[13] = 0.0f;
h_A_dense[2] = 0.3566f; h_A_dense[6] = 0.0723f; h_A_dense[10] = 100.7543f; h_A_dense[14] = 0.0f;
h_A_dense[3] = 0.f; h_A_dense[7] = 0.0f; h_A_dense[11] = 0.0f; h_A_dense[15] = 0.1f;
*/
//for (int col = 0; col < Ncols; col++){
// for (int row = 0; row < Nrows; row++){
// h_A_dense[IDX2C(col, row, N)] = (float) h_A_dense_double[IDX2C(col, row, N)];
// //a[IDX2C(col, row, n)] = (float)ind++;
// //h_A_dense[IDX2C(col, row, N)] = 0;
// }
//}
if (!cuda_use){
for (int col = 0; col < Ncols; col++){
h_A_dense[IDX2C(col, 0, N)] = 0;
h_A_dense[IDX2C(col, 1, N)] = 0;
if (dim == 3){
h_A_dense[IDX2C(col, 2, N)] = 0;
}
}
h_A_dense[IDX2C(0, 0, N)] = 1.0;
h_A_dense[IDX2C(1, 1, N)] = 1.0;
if (dim == 3){
h_A_dense[IDX2C(2, 2, N)] = 1.0;
}
set_zero_AxB();
gpuErrchk(hipMemcpy(d_A_dense, h_A_dense, Nrows * Ncols * sizeof(*d_A_dense), hipMemcpyHostToDevice));
}
#if 0
std::ofstream writenodes("global_K.txt");
for (int j = 0; j < N; j++){
for (int i = 0; i < N; i++){
writenodes << h_A_dense[IDX2C(j, i, N)] << " ";
}
writenodes << std::endl;
}
writenodes.close();
#endif // 0
// --- Create device array and copy host array to it
/*for (int j = 0; j < 20; j++){
for (int i = 0; i < 20; i++){
std::cout << h_A_dense[IDX2C(j, i, N)] << std::endl;
}
std::cout<<std::endl;
}*/
// --- Descriptor for sparse matrix A
// --- Device side number of nonzero elements per row
cusparseSafeCall(hipsparseSnnz(handle, HIPSPARSE_DIRECTION_ROW, Nrows, Ncols, descrA, d_A_dense, lda, d_nnzPerVector, &nnz));
// --- Host side number of nonzero elements per row
gpuErrchk(hipMemcpy(h_nnzPerVector, d_nnzPerVector, Nrows * sizeof(*h_nnzPerVector), hipMemcpyDeviceToHost));
/*printf("Number of nonzero elements in dense matrix = %i\n\n", nnz);
for (int i = 0; i < 10; ++i) printf("Number of nonzero elements in row %i = %i \n", i, h_nnzPerVector[i]);
printf("\n");*/
// --- Device side dense matrix
gpuErrchk(hipMalloc(&d_A, nnz * sizeof(*d_A)));
gpuErrchk(hipMalloc(&d_A_ColIndices, nnz * sizeof(*d_A_ColIndices)));
cusparseSafeCall(hipsparseSdense2csr(handle, Nrows, Ncols, descrA, d_A_dense, lda, d_nnzPerVector, d_A, d_A_RowIndices, d_A_ColIndices));
std::clock_t start_K;
start_K = std::clock();
// --- Host side dense matrix
float *h_A = (float *)malloc(nnz * sizeof(*h_A));
int *h_A_RowIndices = (int *)malloc((Nrows + 1) * sizeof(*h_A_RowIndices));
int *h_A_ColIndices = (int *)malloc(nnz * sizeof(*h_A_ColIndices));
gpuErrchk(hipMemcpy(h_A, d_A, nnz*sizeof(*h_A), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(h_A_RowIndices, d_A_RowIndices, (Nrows + 1) * sizeof(*h_A_RowIndices), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(h_A_ColIndices, d_A_ColIndices, nnz * sizeof(*h_A_ColIndices), hipMemcpyDeviceToHost));
std::cout << nnz << std::endl;
/*printf("\nOriginal matrix in CSR format\n\n");
for (int i = 0; i < 10; ++i) printf("A[%i] = %.0f ", i, h_A[i]); printf("\n");
printf("\n");
for (int i = 0; i < (10 + 1); ++i) printf("h_A_RowIndices[%i] = %i \n", i, h_A_RowIndices[i]); printf("\n");
for (int i = 0; i < 10; ++i) printf("h_A_ColIndices[%i] = %i \n", i, h_A_ColIndices[i]);
*/
// --- Allocating and defining dense host and device data vectors
float *h_x = (float *)malloc(Nrows * sizeof(float));
/*h_x[0] = 100.0; h_x[1] = 200.0; h_x[2] = 400.0; h_x[3] = 500.0;*/
for (int i = 0; i < N; i++){
h_x[i] = f[i];
}
if (dim == 3){
h_x[0] = h_x[1] = h_x[2] = 0;
}
else {
h_x[0] = h_x[1] = 0;
}
float *d_x; gpuErrchk(hipMalloc(&d_x, Nrows * sizeof(float)));
gpuErrchk(hipMemcpy(d_x, h_x, Nrows * sizeof(float), hipMemcpyHostToDevice));
/******************************************/
/* STEP 1: CREATE DESCRIPTORS FOR L AND U */
/******************************************/
/********************************************************************************************************/
/* STEP 2: QUERY HOW MUCH MEMORY USED IN CHOLESKY FACTORIZATION AND THE TWO FOLLOWING SYSTEM INVERSIONS */
/********************************************************************************************************/
int pBufferSize_M, pBufferSize_L, pBufferSize_Lt;
cusparseSafeCall(hipsparseScsric02_bufferSize(handle, N, nnz, descrA, d_A, d_A_RowIndices, d_A_ColIndices, info_A, &pBufferSize_M));
cusparseSafeCall(hipsparseScsrsv2_bufferSize(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_L, &pBufferSize_L));
cusparseSafeCall(hipsparseScsrsv2_bufferSize(handle, HIPSPARSE_OPERATION_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_Lt, &pBufferSize_Lt));
int pBufferSize = max(pBufferSize_M, max(pBufferSize_L, pBufferSize_Lt));
void *pBuffer = 0; gpuErrchk(hipMalloc((void**)&pBuffer, pBufferSize));
/******************************************************************************************************/
/* STEP 3: ANALYZE THE THREE PROBLEMS: CHOLESKY FACTORIZATION AND THE TWO FOLLOWING SYSTEM INVERSIONS */
/******************************************************************************************************/
int structural_zero;
cusparseSafeCall(hipsparseScsric02_analysis(handle, N, nnz, descrA, d_A, d_A_RowIndices, d_A_ColIndices, info_A, HIPSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
hipsparseStatus_t status = hipsparseXcsric02_zeroPivot(handle, info_A, &structural_zero);
if (HIPSPARSE_STATUS_ZERO_PIVOT == status){ printf("A(%d,%d) is missing\n", structural_zero, structural_zero); }
cusparseSafeCall(hipsparseScsrsv2_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_L, HIPSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
cusparseSafeCall(hipsparseScsrsv2_analysis(handle, HIPSPARSE_OPERATION_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_Lt, HIPSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer));
/*************************************/
/* STEP 4: FACTORIZATION: A = L * L' */
/*************************************/
int numerical_zero;
cusparseSafeCall(hipsparseScsric02(handle, N, nnz, descrA, d_A, d_A_RowIndices, d_A_ColIndices, info_A, HIPSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
status = hipsparseXcsric02_zeroPivot(handle, info_A, &numerical_zero);
/*if (HIPSPARSE_STATUS_ZERO_PIVOT == status){ printf("L(%d,%d) is zero\n", numerical_zero, numerical_zero); }
*/
gpuErrchk(hipMemcpy(h_A, d_A, nnz * sizeof(float), hipMemcpyDeviceToHost));
/*printf("\nNon-zero elements in Cholesky matrix\n\n");
for (int k = 0; k<10; k++) printf("%f\n", h_A[k]);*/
cusparseSafeCall(hipsparseScsr2dense(handle, Nrows, Ncols, descrA, d_A, d_A_RowIndices, d_A_ColIndices, d_A_dense, Nrows));
/*printf("\nCholesky matrix\n\n");
for (int i = 0; i < 10; i++) {
std::cout << "[ ";
for (int j = 0; j < 10; j++)
std::cout << h_A_dense[i * Ncols + j] << " ";
std::cout << "]\n";
}*/
/*********************/
/* STEP 5: L * z = x */
/*********************/
// --- Allocating the intermediate result vector
float *d_z; gpuErrchk(hipMalloc(&d_z, N * sizeof(float)));
const float alpha = 1.;
cusparseSafeCall(hipsparseScsrsv2_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, nnz, &alpha, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_L, d_x, d_z, HIPSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
/**********************/
/* STEP 5: L' * y = z */
/**********************/
// --- Allocating the host and device side result vector
float *h_y = (float *)malloc(Ncols * sizeof(float));
float *d_y; gpuErrchk(hipMalloc(&d_y, Ncols * sizeof(float)));
cusparseSafeCall(hipsparseScsrsv2_solve(handle, HIPSPARSE_OPERATION_TRANSPOSE, N, nnz, &alpha, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_Lt, d_z, d_y, HIPSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer));
hipMemcpy(h_x, d_y, N * sizeof(float), hipMemcpyDeviceToHost);
printf("\n\nFinal result\n");
/*for (int k = 0; k<20; k++) printf("dx[%i] = %f\n", k, h_x[k]);
for (int k = 0; k<20; k++) printf("xs[%i] = %f\n", k, x[k]);*/
for (int i = 0; i < numNodes; i++) {
x[i] = x[i] + h_x[i * dim];
y[i] = y[i] + h_x[i * dim + 1];
if (dim == 3){
z[i] = z[i] + h_x[i * dim + 2];
}
}
free(h_A);
free(h_A_RowIndices);
free(h_A_ColIndices);
//free(h_x);
free(h_y);
hipFree(d_x);
hipFree(pBuffer);
hipFree(d_z);
hipFree(d_y);
duration_K = (std::clock() - start_K) / (double)CLOCKS_PER_SEC;
//std::cout << " change status : " << changeNode << std::endl;
//std::cout << "FPS time: " <<1/duration_K << std::endl;
//std::cout << "Duration: " << duration_K << std::endl;
return 0;
}
//int Geometry::tt()
//{
// // --- Initialize cuSPARSE
// hipsparseHandle_t handle; cusparseSafeCall(hipsparseCreate(&handle));
//
// const int Nrows = numNodes*dim; // --- Number of rows
// const int Ncols = numNodes*dim; // --- Number of columns
// const int N = Nrows;
//
// // --- Host side dense matrix
// double *h_A_dense = (double*)malloc(Nrows*Ncols*sizeof(*h_A_dense));
//
// // --- Column-major ordering
// /*h_A_dense[0] = 0.4612f; h_A_dense[4] = -0.0006f; h_A_dense[8] = 0.3566f; h_A_dense[12] = 0.0f;
// h_A_dense[1] = -0.0006f; h_A_dense[5] = 0.4640f; h_A_dense[9] = -1000.0723f; h_A_dense[13] = 0.0f;
// h_A_dense[2] = 0.3566f; h_A_dense[6] = 0.0723f; h_A_dense[10] = 100.7543f; h_A_dense[14] = 0.0f;
// h_A_dense[3] = 0.f; h_A_dense[7] = 0.0f; h_A_dense[11] = 0.0f; h_A_dense[15] = 0.1f;
// */
// for (int col = 0; col < Ncols; col++){
// for (int row = 0; row < Nrows; row++){
//
// h_A_dense[IDX2C(col, row, N)] = K[col][row];
// //a[IDX2C(col, row, n)] = (double)ind++;
// //h_A_dense[IDX2C(col, row, N)] = 0;
//
//
// }
//
// }
// for (int col = 0; col < Ncols; col++){
//
// h_A_dense[IDX2C(col, 0, N)] = 0;
// h_A_dense[IDX2C(col, 1, N)] = 0;
// }
// h_A_dense[IDX2C(0, 0, N)] = 1;
// h_A_dense[IDX2C(1, 1, N)] = 1;
//
//
// // --- Create device array and copy host array to it
// double *d_A_dense; gpuErrchk(hipMalloc(&d_A_dense, Nrows * Ncols * sizeof(*d_A_dense)));
// gpuErrchk(hipMemcpy(d_A_dense, h_A_dense, Nrows * Ncols * sizeof(*d_A_dense), hipMemcpyHostToDevice));
//
// // --- Descriptor for sparse matrix A
// hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA));
// cusparseSafeCall(hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL));
// cusparseSafeCall(hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE));
//
// int nnz = 0; // --- Number of nonzero elements in dense matrix
// const int lda = Nrows; // --- Leading dimension of dense matrix
// // --- Device side number of nonzero elements per row
// int *d_nnzPerVector; gpuErrchk(hipMalloc(&d_nnzPerVector, Nrows * sizeof(*d_nnzPerVector)));
// cusparseSafeCall(hipsparseDnnz(handle, HIPSPARSE_DIRECTION_ROW, Nrows, Ncols, descrA, d_A_dense, lda, d_nnzPerVector, &nnz));
// // --- Host side number of nonzero elements per row
// int *h_nnzPerVector = (int *)malloc(Nrows * sizeof(*h_nnzPerVector));
// gpuErrchk(hipMemcpy(h_nnzPerVector, d_nnzPerVector, Nrows * sizeof(*h_nnzPerVector), hipMemcpyDeviceToHost));
//
// /*printf("Number of nonzero elements in dense matrix = %i\n\n", nnz);
// for (int i = 0; i < 10; ++i) printf("Number of nonzero elements in row %i = %i \n", i, h_nnzPerVector[i]);
// printf("\n");*/
//
// // --- Device side dense matrix
// double *d_A; gpuErrchk(hipMalloc(&d_A, nnz * sizeof(*d_A)));
// int *d_A_RowIndices; gpuErrchk(hipMalloc(&d_A_RowIndices, (Nrows + 1) * sizeof(*d_A_RowIndices)));
// int *d_A_ColIndices; gpuErrchk(hipMalloc(&d_A_ColIndices, nnz * sizeof(*d_A_ColIndices)));
//
// cusparseSafeCall(hipsparseDdense2csr(handle, Nrows, Ncols, descrA, d_A_dense, lda, d_nnzPerVector, d_A, d_A_RowIndices, d_A_ColIndices));
//
// // --- Host side dense matrix
// double *h_A = (double *)malloc(nnz * sizeof(*h_A));
// int *h_A_RowIndices = (int *)malloc((Nrows + 1) * sizeof(*h_A_RowIndices));
// int *h_A_ColIndices = (int *)malloc(nnz * sizeof(*h_A_ColIndices));
// gpuErrchk(hipMemcpy(h_A, d_A, nnz*sizeof(*h_A), hipMemcpyDeviceToHost));
// gpuErrchk(hipMemcpy(h_A_RowIndices, d_A_RowIndices, (Nrows + 1) * sizeof(*h_A_RowIndices), hipMemcpyDeviceToHost));
// gpuErrchk(hipMemcpy(h_A_ColIndices, d_A_ColIndices, nnz * sizeof(*h_A_ColIndices), hipMemcpyDeviceToHost));
//
// /*printf("\nOriginal matrix in CSR format\n\n");
// for (int i = 0; i < 10; ++i) printf("A[%i] = %.0f ", i, h_A[i]); printf("\n");
//
// printf("\n");
// for (int i = 0; i < (10 + 1); ++i) printf("h_A_RowIndices[%i] = %i \n", i, h_A_RowIndices[i]); printf("\n");
//
// for (int i = 0; i < 10; ++i) printf("h_A_ColIndices[%i] = %i \n", i, h_A_ColIndices[i]);
// */
// // --- Allocating and defining dense host and device data vectors
// double *h_x = (double *)malloc(Nrows * sizeof(double));
// /*h_x[0] = 100.0; h_x[1] = 200.0; h_x[2] = 400.0; h_x[3] = 500.0;*/
// for (int i = 0; i < N; i++){
// h_x[i] = f[i];
// }
// h_x[0] = h_x[1] = 0;
//
// double *d_x; gpuErrchk(hipMalloc(&d_x, Nrows * sizeof(double)));
// gpuErrchk(hipMemcpy(d_x, h_x, Nrows * sizeof(double), hipMemcpyHostToDevice));
//
// /******************************************/
// /* STEP 1: CREATE DESCRIPTORS FOR L AND U */
// /******************************************/
// hipsparseMatDescr_t descr_L = 0;
// cusparseSafeCall(hipsparseCreateMatDescr(&descr_L));
// cusparseSafeCall(hipsparseSetMatIndexBase(descr_L, HIPSPARSE_INDEX_BASE_ONE));
// cusparseSafeCall(hipsparseSetMatType(descr_L, HIPSPARSE_MATRIX_TYPE_GENERAL));
// cusparseSafeCall(hipsparseSetMatFillMode(descr_L, HIPSPARSE_FILL_MODE_LOWER));
// cusparseSafeCall(hipsparseSetMatDiagType(descr_L, HIPSPARSE_DIAG_TYPE_NON_UNIT));
//
// /********************************************************************************************************/
// /* STEP 2: QUERY HOW MUCH MEMORY USED IN CHOLESKY FACTORIZATION AND THE TWO FOLLOWING SYSTEM INVERSIONS */
// /********************************************************************************************************/
// csric02Info_t info_A = 0; cusparseSafeCall(hipsparseCreateCsric02Info(&info_A));
// csrsv2Info_t info_L = 0; cusparseSafeCall(hipsparseCreateCsrsv2Info(&info_L));
// csrsv2Info_t info_Lt = 0; cusparseSafeCall(hipsparseCreateCsrsv2Info(&info_Lt));
//
// int pBufferSize_M, pBufferSize_L, pBufferSize_Lt;
// cusparseSafeCall(hipsparseDcsric02_bufferSize(handle, N, nnz, descrA, d_A, d_A_RowIndices, d_A_ColIndices, info_A, &pBufferSize_M));
// cusparseSafeCall(hipsparseDcsrsv2_bufferSize(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_L, &pBufferSize_L));
// cusparseSafeCall(hipsparseDcsrsv2_bufferSize(handle, HIPSPARSE_OPERATION_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_Lt, &pBufferSize_Lt));
//
// int pBufferSize = max(pBufferSize_M, max(pBufferSize_L, pBufferSize_Lt));
// void *pBuffer = 0; gpuErrchk(hipMalloc((void**)&pBuffer, pBufferSize));
//
// /******************************************************************************************************/
// /* STEP 3: ANALYZE THE THREE PROBLEMS: CHOLESKY FACTORIZATION AND THE TWO FOLLOWING SYSTEM INVERSIONS */
// /******************************************************************************************************/
// int structural_zero;
//
// cusparseSafeCall(hipsparseDcsric02_analysis(handle, N, nnz, descrA, d_A, d_A_RowIndices, d_A_ColIndices, info_A, HIPSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
//
// hipsparseStatus_t status = hipsparseXcsric02_zeroPivot(handle, info_A, &structural_zero);
// if (HIPSPARSE_STATUS_ZERO_PIVOT == status){ printf("A(%d,%d) is missing\n", structural_zero, structural_zero); }
//
// cusparseSafeCall(hipsparseDcsrsv2_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_L, HIPSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
// cusparseSafeCall(hipsparseDcsrsv2_analysis(handle, HIPSPARSE_OPERATION_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_Lt, HIPSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer));
//
// /*************************************/
// /* STEP 4: FACTORIZATION: A = L * L' */
// /*************************************/
// int numerical_zero;
//
// cusparseSafeCall(hipsparseDcsric02(handle, N, nnz, descrA, d_A, d_A_RowIndices, d_A_ColIndices, info_A, HIPSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
// status = hipsparseXcsric02_zeroPivot(handle, info_A, &numerical_zero);
// /*if (HIPSPARSE_STATUS_ZERO_PIVOT == status){ printf("L(%d,%d) is zero\n", numerical_zero, numerical_zero); }
// */
//
// gpuErrchk(hipMemcpy(h_A, d_A, nnz * sizeof(double), hipMemcpyDeviceToHost));
// /*printf("\nNon-zero elements in Cholesky matrix\n\n");
// for (int k = 0; k<10; k++) printf("%f\n", h_A[k]);*/
//
// cusparseSafeCall(hipsparseDcsr2dense(handle, Nrows, Ncols, descrA, d_A, d_A_RowIndices, d_A_ColIndices, d_A_dense, Nrows));
//
// /*printf("\nCholesky matrix\n\n");
// for (int i = 0; i < 10; i++) {
// std::cout << "[ ";
// for (int j = 0; j < 10; j++)
// std::cout << h_A_dense[i * Ncols + j] << " ";
// std::cout << "]\n";
// }*/
//
// /*********************/
// /* STEP 5: L * z = x */
// /*********************/
// // --- Allocating the intermediate result vector
// double *d_z; gpuErrchk(hipMalloc(&d_z, N * sizeof(double)));
//
// const double alpha = 1.;
// cusparseSafeCall(hipsparseDcsrsv2_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, nnz, &alpha, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_L, d_x, d_z, HIPSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
//
// /**********************/
// /* STEP 5: L' * y = z */
// /**********************/
// // --- Allocating the host and device side result vector
// double *h_y = (double *)malloc(Ncols * sizeof(double));
// double *d_y; gpuErrchk(hipMalloc(&d_y, Ncols * sizeof(double)));
//
// cusparseSafeCall(hipsparseDcsrsv2_solve(handle, HIPSPARSE_OPERATION_TRANSPOSE, N, nnz, &alpha, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_Lt, d_z, d_y, HIPSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer));
//
// hipMemcpy(h_x, d_y, N * sizeof(double), hipMemcpyDeviceToHost);
// /*printf("\n\nFinal result\n");
// for (int k = 0; k<10; k++) printf("x[%i] = %f\n", k, h_x[k]);
// */
// for (int i = 0; i < numNodes; i++) {
// x[i] = x[i] + h_x[i * 2];
// y[i] = y[i] + h_x[i * 2 + 1];
// }
// hipFree(d_A_dense);
// hipFree(d_nnzPerVector);
// hipFree(d_A);
// hipFree(d_A_RowIndices);
// hipFree(d_A_ColIndices);
// hipFree(d_x);
// hipFree(pBuffer);
// hipFree(d_z);
// hipFree(d_y);
//
// free(h_nnzPerVector);
//
// free(h_A_dense);
//
// free(h_A);
// free(h_A_RowIndices);
// free(h_A_ColIndices);
// free(h_x);
// free(h_y);
//
// return 0;
//} | c2a5c78a3bbd39718826b8dbc7044d1b43a154b1.cu | #include <iostream>
#include "string"
#include "fstream"
#include "cudaFEM_read.cuh"
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include "cublas_v2.h"
#include <iostream>
#include "fstream"
#include <cuda.h>
#include <cusolverSp.h>
#include "device_launch_parameters.h"
#include <cusolverDn.h>
#include <cusparse.h>
#include <vector>
#include <cassert>
#include "Utilities.cuh"
#include <ctime>
#include "cuda_functions.cuh"
#define max(a,b) ((a) > (b) ? (a) : (b))
#define IDX2C(i,j,ld) (((j)*(ld))+( i )) //first entry is columns and second entry is rows.
#define threeD21D(row_d,col_d,el_d,width_d,depth_d) (row_d+width_d*(col_d+depth_d*el_d))
#define nodesinelemX(node,el,nodesPerElem) (node + nodesPerElem*el)
#define nodesDisplacementX(dof,node,dimension) (dof + node*dimension)
Geometry::Geometry(){
std::cout << "Geometry Object created" << std::endl;
cuda_use = false;
}
Geometry::~Geometry(){
std::cout << "Geometry Object deleted" << std::endl;
//deleteing dynamic arrays
delete[] x;
delete[] y;
delete[] z;
for (int e = 0; e < numE; e++){
for (int i = 0; i < numNodesPerElem*dim; i++){
delete E[e][i];
delete M[e][i];
}
delete E[e];
delete M[e];
delete nodesInElem[e];
}
for (int i = 0; i < numNodes; i++){
delete displaceInElem[i];
}
for (int i = 0; i < numNodes*dim; i++) {
delete K[i];
}
delete[] K;
delete[] u;
delete[] f;
delete[] displaceInElem;
delete[] E;
delete[] M;
delete[] nodesInElem;
delete[] E_vector_host;
delete[] elemForce;
delete[] forceVec_x;
delete[] forceVec_y;
delete[] K_vector_form;
//
cudaFree(d_A_dense);
cudaFree(d_nnzPerVector);
cudaFree(d_A);
cudaFree(d_A_RowIndices);
cudaFree(d_A_ColIndices);
cudaFree(nodesInElem_device);
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
cudaFree(E_vector_device);
free(h_nnzPerVector);
//free(h_A_dense);
}
void Geometry::read_nodes(){
std::ifstream in_matrix("FEM_Nodes.txt");
if (!in_matrix){
std::cout << "cannot open Nodes \n";
}
in_matrix >> numNodes;
x = new double[numNodes];
y = new double[numNodes];
z = new double[numNodes];
x_init = new double[numNodes];
y_init = new double[numNodes];
if (dim == 3){
for (int i = 0; i < numNodes; i++){
in_matrix >> x[i] >> y[i] >> z[i];
}
}
else if(dim ==2){
for (int i = 0; i < numNodes; i++){
in_matrix >> x[i] >> y[i];
x_init[i] = x[i];
y_init[i] = y[i];
z[i] = 0;
}
}
in_matrix.close();
//u = new double[numNodes*dim];
b_rhs = new float[numNodes*dim];
}
void Geometry::read_elem(){
std::ifstream in_elem("FEM_Elem.txt");
std::cout << "Reading in element files" << std::endl;
if (!in_elem){
std::cout << "cannot open Element file \n";
}
int a;
in_elem >>numE >> numNodesPerElem;
//Allocating E matrix 3x3x3 matrix
E = new double**[numE];
M = new double**[numE];
nodesInElem = new int*[numE];
nodesInElem_host = new int[numE*numNodesPerElem];
nodesInElem_device = new int[numE*numNodesPerElem];
//Allocate a new vector for storing all of the stresses at an element
global_stress_mises = new double[numE];
cudaMalloc((void**)&nodesInElem_device, numE*numNodesPerElem*sizeof(int));
for (int e = 0; e < numE; e++){
E[e] = new double*[numNodesPerElem*dim];
M[e] = new double*[numNodesPerElem*dim];
nodesInElem[e] = new int[numNodesPerElem];
for (int i = 0; i < numNodesPerElem*dim; i++){
E[e][i] = new double[numNodesPerElem*dim];
M[e][i] = new double[numNodesPerElem*dim];
}
}
E_vector_host = new double[numE*numNodesPerElem*dim*numNodesPerElem*dim];
cudaMalloc((void**)&E_vector_device, numE*numNodesPerElem*dim*numNodesPerElem*dim*sizeof(*E_vector_device));
//Populating the nodesinelem matrix
for (int e = 0; e < numE; e++) {
for (int i = 0; i < numNodesPerElem; i++)
in_elem >> nodesInElem[e][i];
}
in_elem.close();
for (int e = 0; e < numE; e++) {
for (int i = 0; i < numNodesPerElem; i++){
nodesInElem_host[nodesinelemX(i, e, numNodesPerElem)] = nodesInElem[e][i];
//std::cout << nodesInElem_host[nodesinelemX(i, e, numNodesPerElem)] << std::endl;
}
//std::cout << std::endl;
}
cudaMemcpy(nodesInElem_device, nodesInElem_host, numE*numNodesPerElem*sizeof(int), cudaMemcpyHostToDevice);
std::ifstream in_disp("FEM_displacement.txt");
if (!in_disp){
std::cout << "cannot open displacement file \n";
}
displaceInElem = new int*[numNodes];
displaceInElem_host = new int[numNodes*dim];
displaceInElem_device = new int[numNodes*dim];
for (int i = 0; i < numNodes; i++){
displaceInElem[i] = new int[3];
}
cudaMalloc((void**)&displaceInElem_device, numNodes*dim*sizeof(int));
for (int i = 0; i < numNodes; i++){
for (int j = 0; j < dim; j++){
in_disp >> displaceInElem[i][j];
}
}
for (int i = 0; i < numNodes; i++){
for (int j = 0; j < dim; j++){
displaceInElem_host[nodesDisplacementX(j, i, dim)] = displaceInElem[i][j];
}
}
cudaMemcpy(displaceInElem_device, displaceInElem_host, numNodes*dim*sizeof(int), cudaMemcpyHostToDevice);
in_disp.close();
}
void Geometry::read_force(){
std::ifstream in_matrix("FEM_force.txt");
if (!in_matrix){
std::cout << "cannot open force file \n";
}
else{
in_matrix >> numForceBC;
elemForce = new int[numForceBC];
localcoordForce = new int[numForceBC];
forceVec_x = new double[numForceBC];
forceVec_y = new double[numForceBC];
for (int i = 0; i < numForceBC; i++){
in_matrix >> elemForce[i] >> localcoordForce[i] >> forceVec_x[i] >> forceVec_y[i];
}
}
in_matrix.close();
}
void Geometry::initilizeMatrices(){
#if 0
cudaMalloc((void**)&d_x, numNodes*sizeof(double));
cudaMalloc((void**)&d_y, numNodes*sizeof(double));
cudaMalloc((void**)&d_z, numNodes*sizeof(double));
#endif // 0
cudaMalloc((void**)&d_x_dist, numNodes*sizeof(*d_x_dist));
cudaMalloc((void**)&d_y_dist, numNodes*sizeof(*d_x_dist));
cudaMalloc((void**)&d_z_dist, numNodes*sizeof(*d_x_dist));
K = new double*[numNodes*dim];
h_A_dense = new float[numNodes*dim*numNodes*dim*sizeof(*h_A_dense)];
h_M_dense = new double[numNodes*dim*numNodes*dim*sizeof(*h_M_dense)];
L = new float[numNodes*dim*numNodes*dim*sizeof(*L)];
//d_A_dense_double = new double[numNodes*dim*numNodes*dim*sizeof(*d_A_dense_double)];
h_A_dense_double = new double[numNodes*dim*numNodes*dim*sizeof(*h_A_dense_double)];
gpuErrchk(cudaMalloc((void**)&d_A_dense, numNodes*dim*numNodes*dim* sizeof(*d_A_dense)));
gpuErrchk(cudaMalloc((void**)&device_L, numNodes*dim*numNodes*dim* sizeof(*device_L)));
gpuErrchk(cudaMalloc((void**)&d_A_dense_double, numNodes*dim*numNodes*dim* sizeof(*d_A_dense_double)));
//B = new double*[3];
for (int i = 0; i < 6; i++){
//B[i] = new double[3];
}
for (int i = 0; i < numNodes*dim; i++) {
K[i] = new double[numNodes*dim];
}
u = new double[numNodes*dim];
f = new double[numNodes*dim];
u_dot = new double[numNodes*dim];
u_doubledot = new double[numNodes*dim];
u_doubledot_old = new double[numNodes*dim];
for (int i = 0; i < numNodes*dim; i++){
f[i] = 0;
}
}
void Geometry::make_K_matrix(){
std::clock_t start_K_local1;
std::clock_t start_K_global;
start_K_local1 = std::clock();
bool cuda_use = get_cuda_use();
//bool cuda_use = true;
std::clock_t start_K_actual;
start_K_actual = std::clock();
start_K_global = std::clock();
if (cuda_use){
if (dim == 2){
Linear2DBarycentric_B_CUDA_host();
}
else if (dim == 3){
Linear3DBarycentric_B_CUDA_host();
}
}
else{
for (int e = 0; e < numE; e++) {
//cout << Linear2DJacobianDet_Barycentric(nodesInElem[e], x, y) << endl;
if (dim == 2){
AssembleLocalElementMatrixBarycentric2D(e,nodesInElem[e],displaceInElem, x, y, dim, E[e],M[e], Poisson, Young, thickness);
}
else if (dim == 3){
AssembleLocalElementMatrixBarycentric3D(nodesInElem[e], x, y, z, dim, E[e], Poisson, Young, thickness);
}
}
}
double duration_K_local = (std::clock() - start_K_local1) / (double)CLOCKS_PER_SEC;
if (!cuda_use)
AssembleGlobalElementMatrixBarycentric(numNodes*dim, numE, numNodesPerElem, nodesInElem, E, M, h_A_dense, h_M_dense, displaceInElem);
double duration_K_global = (std::clock() - start_K_global) / (double)CLOCKS_PER_SEC;
//std::cout << " CUDA K ASSEMBLE: " << duration_K_global << std::endl;
//ApplyEssentialBoundaryConditionsBarycentric(numNodes*dim, numForceBC, localcoordForce, elemForce, forceVec_x, forceVec_y, f, K, nodesInElem, thickness, x, y, displaceInElem);
//ApplyEssentialBoundaryConditionsBarycentric(numNodes*dim, sudo_node_force, localcoordForce, elemForce, sudo_force_x, sudo_force_y, f, K, nodesInElem, thickness, x, y, displaceInElem);
ApplySudoForcesBarycentric(numNodes*dim, sudo_node_force, localcoordForce, elemForce, sudo_force_x, sudo_force_y, f, nodesInElem, thickness, x, y, displaceInElem);
/*for (int i = 0; i < numNodes*dim; i++){
std::cout << f[i] << std::endl;
}*/
//std::cout << "FPS time local K matrix: " << duration_K_local << std::endl;
//std::cout << "FPS time global K matrix: " << duration_K_global << std::endl;
//std::cout << "sudo force x: " << sudo_force_x << " sudo_force y: " << sudo_force_y << std::endl;
}
//void Geometry::call_sudo_force_func(void){
//
// //call this to apply the sudo forces
// //ApplySudoForcesBarycentric(numNodes*dim, sudo_node_force, localcoordForce, elemForce, sudo_force_x, sudo_force_y, f, nodesInElem, thickness, x, y, displaceInElem, force_reset);
//
//}
void Geometry::AssembleGlobalElementMatrixBarycentric(int numP, int numE, int nodesPerElem, int **elem, double ***E,double ***M, float *K, double *global_M, int **displaceInElem){
//cout << numP << endl << endl << endl << endl;
//Initialising several variables
int i;
int j;
int row;
int col;
//Make a numPxnumP matrix all equal to zero
for (j = 0; j < numP; j++){
for (i = 0; i < numP; i++){
K[IDX2C(j, i, numP)] = 0;
L[IDX2C(j, i, numP)] =0;
global_M[IDX2C(j, i, numP)] = 0;
}
}
int dummy_node;
int loop_node;
int dummy_row;
int dummy_col;
int *DOF = new int[numNodes*dim];
int counter;
for (int k = 0; k < numE; k++){
counter = 0;
for (int npe = 0; npe < numNodesPerElem; npe++){
dummy_node = elem[k][npe]; // The row of the matrix we looking at will be k_th element and npe (nodes per element)
for (int dof = 0; dof < dim; dof++){
row = displaceInElem[dummy_node][dof];
DOF[counter] = row;
//cout << DOF[counter] << endl;
counter++;
}
}
for (int c = 0; c < numNodesPerElem*dim; c++){
for (int r = 0; r < numNodesPerElem*dim; r++){
K[IDX2C(DOF[c], DOF[r], numP)] = K[IDX2C(DOF[c], DOF[r], numP)] + E[k][r][c];
global_M[IDX2C(DOF[c], DOF[r], numP)] = global_M[IDX2C(DOF[c], DOF[r], numP)] + M[k][r][c];
L[IDX2C(DOF[c], DOF[r], numP)] = (dt*c_xi*beta_1+dt*dt*beta_2 / 2.0)*K[IDX2C(DOF[c], DOF[r], numP)] + (1+dt*beta_1*c_alpha)*global_M[IDX2C(DOF[c], DOF[r], numP)]; //
//K[IDX2C(DOF[r], DOF[c], numP*dim)] = K[IDX2C(DOF[r], DOF[c], numP*dim)] + E[k][r][c];
}
}
}
//for (int k = 0; k < numE; k++){
// counter = 0;
// for (int npe = 0; npe < numNodesPerElem; npe++){
// dummy_node = elem[k][npe]; // The row of the matrix we looking at will be k_th element and npe (nodes per element)
// for (int dof = 0; dof < dim; dof++){
// row = displaceInElem[dummy_node][dof];
// DOF[counter] = row;
// //cout << DOF[counter] << endl;
// counter++;
// }
// }
// for (int c = 0; c < numNodesPerElem*dim; c++){
// for (int r = 0; r < numNodesPerElem*dim; r++){
//
// L[IDX2C(DOF[c], DOF[r], numP)] = (dt*c_xi*beta_1 + dt*dt*beta_2 / 2.0)*K[IDX2C(DOF[c], DOF[r], numP)] + (1 + dt*beta_1*c_alpha)*global_M[IDX2C(DOF[c], DOF[r], numP)]; //
// //K[IDX2C(DOF[r], DOF[c], numP*dim)] = K[IDX2C(DOF[r], DOF[c], numP*dim)] + E[k][r][c];
// }
// }
//}
//for (int k = 0; k < numE; k++){
//
// for (int c = 0; c < numNodesPerElem*dim; c++){
// for (int r = 0; r < numNodesPerElem*dim; r++){
//
// L[IDX2C(DOF[c], DOF[r], numP)] = (dt*c_xi*beta_1 + dt*dt*beta_2 / 2.0)*K[IDX2C(DOF[c], DOF[r], numP)] + (1 + dt*beta_1*c_alpha)*global_M[IDX2C(DOF[c], DOF[r], numP)]; //
// //K[IDX2C(DOF[r], DOF[c], numP*dim)] = K[IDX2C(DOF[r], DOF[c], numP*dim)] + E[k][r][c];
// }
// }
//}
//for (i = 0; i < 10; i++){
// for (j = 0; j < 10; j++){
// std::cout << global_M[IDX2C(j, i, numP)] << " ";
// }
// std::cout << std::endl;
//}
}
void Geometry::find_b(){
int du = numNodes*dim;
double use_number;
double dummy_row;
double dummy_row1;
//I am going to apply a forcef only at the initial time step, and then will be zero.
for (int i = 0; i < numNodes*dim; i++){
use_number = 0;
for (int j = 0; j < numNodes*dim; j++){
dummy_row = 0;
dummy_row1 = 0;
dummy_row = u[j] + dt*u_dot[j] + (dt*dt / 2.0)*(1.0 - beta_2)*u_doubledot[j];
dummy_row1 = u_dot[j] + dt*(1.0 - beta_1)*u_doubledot[j];
use_number = use_number + h_A_dense[IDX2C(i, j, du)] * dummy_row + (h_A_dense[IDX2C(i, j, du)] * c_xi + h_M_dense[IDX2C(i, j, du)]*c_alpha)*dummy_row1;
//use_number = use_number + h_A_dense[IDX2C(i, j, du)] * dummy_row;
}
b_rhs[i] = f[i] - use_number;
if (f[i]>0){
std::cout << "f" << std::endl;
}
//std::cout << b_rhs[i] ;
}
}
//initializing the dynamic array
void Geometry::initialize_zerovector(int numberofpoints){
numNodesZero = numberofpoints;
vector_zero_nodes = new int[numberofpoints];
}
void Geometry::initialize_dynamic(){
for (int i = 0; i < numNodes*dim; i++){
u[i] = u_dot[i] = u_doubledot[i] = u_doubledot_old[i]=0.0;
}
}
void Geometry::update_dynamic_vectors(){
for (int i = 0; i < numNodes*dim; i++){
u[i] =dt*u_dot[i] + (dt*dt / 2.0)*((1 - beta_2)*u_doubledot_old[i] + beta_2*u_doubledot[i]);
u_dot[i] = u_dot[i] + dt*((1 - beta_1)*u_doubledot_old[i] + beta_1*u_doubledot[i]);
}
//int row1 = displaceInElem[sudo_force_index[0]][0];
//int row2 = displaceInElem[sudo_force_index[0]][1];
//u[row1] = sudo_force_value1[0]/100.0;
//u[row2] = sudo_force_value2[1]/100.0;
}
void Geometry::update_dynamic_xyz(){
for (int j = 0; j < numNodesZero; j++){
int row1 = displaceInElem[vector_zero_nodes[j]][0];
int row2 = displaceInElem[vector_zero_nodes[j]][1];
u[row1] = 0;
u[row2] = 0;
}
//int row1, row2;
//for (int i = 0; i < numNodesZero; i++){
// row1 = displaceInElem[vector_zero_nodes[i]][0];
// row2 = displaceInElem[vector_zero_nodes[i]][1];
//
// u[row1] = u[row2] = 0;
//}
///*for (int i = 0; i < numNodes; i++) {
// // u[180] = u[181] = 0.0;
// double d = (x_init[i] - x[i])*(x_init[i] - x[i]) + (y_init[i] - y[i])*(y_init[i] - y[i]);
// if (d > 0.00001){
// u[i * dim] = 0;
// u[i * dim + 1] = 0;
// }
//}*/
//}
//
for (int i = 0; i < numNodes; i++) {
//u[180] = u[181] = 0.0;
x[i] = x[i] + u[i * dim];
y[i] = y[i] + u[i * dim + 1];
}
}
void Geometry::update_vector(){ //solve Ax=b for the dynamics case
double duration_K;
this->set_zero_AxB();
/*
for (int col = 0; col < Ncols; col++){
L[IDX2C(col, 0, N)] = 0;
L[IDX2C(col, 1, N)] = 0;
L[IDX2C(col,2, N)] = 0;
L[IDX2C(col, 3, N)] = 0;
if (dim == 3){
L[IDX2C(col, 2, N)] = 0;
}
}
L[IDX2C(0, 0, N)] = 1.0;
L[IDX2C(1, 1, N)] = 1.0;
L[IDX2C(2, 2, N)] = 1.0;
L[IDX2C(3, 3, N)] = 1.0;
if (dim == 3){
L[IDX2C(2, 2, N)] = 1.0;
}
*/
/*for (int j = 0; j <10; j++){
for (int i = 0; i < 10; i++){
std::cout<< L[IDX2C(i, j, N)] << std::endl;
}
std::cout << std::endl;
}*/
// --- Create device array and copy host array to it
gpuErrchk(cudaMemcpy(d_A_dense, L, Nrows * Ncols * sizeof(*d_A_dense), cudaMemcpyHostToDevice));
// --- Descriptor for sparse matrix A
// --- Device side number of nonzero elements per row
cusparseSafeCall(cusparseSnnz(handle, CUSPARSE_DIRECTION_ROW, Nrows, Ncols, descrA, d_A_dense, lda, d_nnzPerVector, &nnz));
// --- Host side number of nonzero elements per row
gpuErrchk(cudaMemcpy(h_nnzPerVector, d_nnzPerVector, Nrows * sizeof(*h_nnzPerVector), cudaMemcpyDeviceToHost));
/*printf("Number of nonzero elements in dense matrix = %i\n\n", nnz);
for (int i = 0; i < 10; ++i) printf("Number of nonzero elements in row %i = %i \n", i, h_nnzPerVector[i]);
printf("\n");*/
// --- Device side dense matrix
gpuErrchk(cudaMalloc(&d_A, nnz * sizeof(*d_A)));
gpuErrchk(cudaMalloc(&d_A_ColIndices, nnz * sizeof(*d_A_ColIndices)));
cusparseSafeCall(cusparseSdense2csr(handle, Nrows, Ncols, descrA, d_A_dense, lda, d_nnzPerVector, d_A, d_A_RowIndices, d_A_ColIndices));
std::clock_t start_K;
start_K = std::clock();
// --- Host side dense matrix
float *h_A = (float *)malloc(nnz * sizeof(*h_A));
int *h_A_RowIndices = (int *)malloc((Nrows + 1) * sizeof(*h_A_RowIndices));
int *h_A_ColIndices = (int *)malloc(nnz * sizeof(*h_A_ColIndices));
gpuErrchk(cudaMemcpy(h_A, d_A, nnz*sizeof(*h_A), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_A_RowIndices, d_A_RowIndices, (Nrows + 1) * sizeof(*h_A_RowIndices), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_A_ColIndices, d_A_ColIndices, nnz * sizeof(*h_A_ColIndices), cudaMemcpyDeviceToHost));
std::cout << nnz << std::endl;
/*printf("\nOriginal matrix in CSR format\n\n");
for (int i = 0; i < 10; ++i) printf("A[%i] = %.0f ", i, h_A[i]); printf("\n");
printf("\n");
for (int i = 0; i < (10 + 1); ++i) printf("h_A_RowIndices[%i] = %i \n", i, h_A_RowIndices[i]); printf("\n");
for (int i = 0; i < 10; ++i) printf("h_A_ColIndices[%i] = %i \n", i, h_A_ColIndices[i]);
*/
// --- Allocating and defining dense host and device data vectors
float *h_x = (float *)malloc(Nrows * sizeof(float));
/*h_x[0] = 100.0; h_x[1] = 200.0; h_x[2] = 400.0; h_x[3] = 500.0;*/
for (int i = 0; i < N; i++){
h_x[i] = b_rhs[i];
}
/*if (dim == 3){
h_x[0] = h_x[1] = h_x[2] = 0;
}
else {
h_x[0] = h_x[1] = 0;
h_x[2] = h_x[3] = 0;
}*/
float *d_x; gpuErrchk(cudaMalloc(&d_x, Nrows * sizeof(float)));
gpuErrchk(cudaMemcpy(d_x, h_x, Nrows * sizeof(float), cudaMemcpyHostToDevice));
/******************************************/
/* STEP 1: CREATE DESCRIPTORS FOR L AND U */
/******************************************/
/********************************************************************************************************/
/* STEP 2: QUERY HOW MUCH MEMORY USED IN CHOLESKY FACTORIZATION AND THE TWO FOLLOWING SYSTEM INVERSIONS */
/********************************************************************************************************/
int pBufferSize_M, pBufferSize_L, pBufferSize_Lt;
cusparseSafeCall(cusparseScsric02_bufferSize(handle, N, nnz, descrA, d_A, d_A_RowIndices, d_A_ColIndices, info_A, &pBufferSize_M));
cusparseSafeCall(cusparseScsrsv2_bufferSize(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_L, &pBufferSize_L));
cusparseSafeCall(cusparseScsrsv2_bufferSize(handle, CUSPARSE_OPERATION_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_Lt, &pBufferSize_Lt));
int pBufferSize = max(pBufferSize_M, max(pBufferSize_L, pBufferSize_Lt));
void *pBuffer = 0; gpuErrchk(cudaMalloc((void**)&pBuffer, pBufferSize));
/******************************************************************************************************/
/* STEP 3: ANALYZE THE THREE PROBLEMS: CHOLESKY FACTORIZATION AND THE TWO FOLLOWING SYSTEM INVERSIONS */
/******************************************************************************************************/
int structural_zero;
cusparseSafeCall(cusparseScsric02_analysis(handle, N, nnz, descrA, d_A, d_A_RowIndices, d_A_ColIndices, info_A, CUSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
cusparseStatus_t status = cusparseXcsric02_zeroPivot(handle, info_A, &structural_zero);
if (CUSPARSE_STATUS_ZERO_PIVOT == status){ printf("A(%d,%d) is missing\n", structural_zero, structural_zero); }
cusparseSafeCall(cusparseScsrsv2_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_L, CUSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
cusparseSafeCall(cusparseScsrsv2_analysis(handle, CUSPARSE_OPERATION_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_Lt, CUSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer));
/*************************************/
/* STEP 4: FACTORIZATION: A = L * L' */
/*************************************/
int numerical_zero;
cusparseSafeCall(cusparseScsric02(handle, N, nnz, descrA, d_A, d_A_RowIndices, d_A_ColIndices, info_A, CUSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
status = cusparseXcsric02_zeroPivot(handle, info_A, &numerical_zero);
/*if (CUSPARSE_STATUS_ZERO_PIVOT == status){ printf("L(%d,%d) is zero\n", numerical_zero, numerical_zero); }
*/
gpuErrchk(cudaMemcpy(h_A, d_A, nnz * sizeof(float), cudaMemcpyDeviceToHost));
/*printf("\nNon-zero elements in Cholesky matrix\n\n");
for (int k = 0; k<10; k++) printf("%f\n", h_A[k]);*/
cusparseSafeCall(cusparseScsr2dense(handle, Nrows, Ncols, descrA, d_A, d_A_RowIndices, d_A_ColIndices, d_A_dense, Nrows));
/*printf("\nCholesky matrix\n\n");
for (int i = 0; i < 10; i++) {
std::cout << "[ ";
for (int j = 0; j < 10; j++)
std::cout << h_A_dense[i * Ncols + j] << " ";
std::cout << "]\n";
}*/
/*********************/
/* STEP 5: L * z = x */
/*********************/
// --- Allocating the intermediate result vector
float *d_z; gpuErrchk(cudaMalloc(&d_z, N * sizeof(float)));
const float alpha = 1.;
cusparseSafeCall(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, nnz, &alpha, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_L, d_x, d_z, CUSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
/**********************/
/* STEP 5: L' * y = z */
/**********************/
// --- Allocating the host and device side result vector
float *h_y = (float *)malloc(Ncols * sizeof(float));
float *d_y; gpuErrchk(cudaMalloc(&d_y, Ncols * sizeof(float)));
cusparseSafeCall(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_TRANSPOSE, N, nnz, &alpha, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_Lt, d_z, d_y, CUSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer));
cudaMemcpy(h_x, d_y, N * sizeof(float), cudaMemcpyDeviceToHost);
printf("\n\nFinal result\n");
/*for (int k = 0; k<20; k++) printf("dx[%i] = %f\n", k, h_x[k]);
for (int k = 0; k<20; k++) printf("xs[%i] = %f\n", k, x[k]);*/
for (int i = 0; i < numNodes*dim; i++) {
u_doubledot_old[i] = u_doubledot[i];
u_doubledot[i] = h_x[i];
}
free(h_A);
free(h_A_RowIndices);
free(h_A_ColIndices);
//free(h_x);
free(h_y);
cudaFree(d_x);
cudaFree(pBuffer);
cudaFree(d_z);
cudaFree(d_y);
duration_K = (std::clock() - start_K) / (double)CLOCKS_PER_SEC;
//std::cout << " change status : " << changeNode << std::endl;
//std::cout << "FPS time: " <<1/duration_K << std::endl;
//std::cout << "Duration: " << duration_K << std::endl;
}
void Geometry::Linear2DBarycentric_B(int *nodes, double *x, double *y, double **term){
//
double J = Linear2DJacobianDet_Barycentric(nodes, x, y);
double y23 = y[nodes[1]] - y[nodes[2]];//y23
double y31 = y[nodes[2]] - y[nodes[0]];//y31
double y12 = y[nodes[0]] - y[nodes[1]];//y12
double x32 = x[nodes[2]] - x[nodes[1]];//x32
double x13 = x[nodes[0]] - x[nodes[2]];//x13
double x21 = x[nodes[1]] - x[nodes[0]];//x21
for (int row = 0; row < 3; row++){
for (int col = 0; col < 6; col++){
term[row][col] = 0;
}
}
term[0][0] = term[2][1] = y23 / (J);
term[0][2] = term[2][3] = y31 / (J);
term[0][4] = term[2][5] = y12 / (J);
term[1][1] = term[2][0] = x32 / (J);
term[1][3] = term[2][2] = x13 / (J);
term[1][5] = term[2][4] = x21 / (J);
/*else {
double **A = new double*[4];
double **T = new double*[3];
double **result = new double*[3];
for (int i = 0; i < 4; i++){
A[i] = new double[6];
}
for (int i = 0; i < 3; i++){
T[i] = new double[4];
result[i] = new double[6];
}
for (int row = 0; row < 3; row++){
for (int col = 0; col < 4; col++){
T[row][col] = 0;
}
}
T[0][0] = T[1][3] = T[2][1] = T[2][2] = 1;
A[0][1] = A[0][3] = A[0][5] = 0;
A[1][1] = A[1][3] = A[1][5] = 0;
A[2][0] = A[2][2] = A[2][4] = 0;
A[3][0] = A[3][2] = A[3][4] = 0;
A[0][0] = A[2][1] = y23/J;
A[0][2] = A[2][3] = y31/J;
A[0][4] = A[2][5] = y12 / J;
A[1][0] = A[3][1] = x32 / J;
A[1][2] = A[3][3] = x13 / J;
A[1][4] = A[3][5] = x21 / J;
}
*/
//MatrixTimes(term, T, A, 3, 4, 4, 6);
}
void Geometry::Linear3DBarycentric_B(int *nodes, double *x, double *y, double *z, double **term){
//
double x14 = x[nodes[0]] - x[nodes[3]];
double x24 = x[nodes[1]] - x[nodes[3]];
double x34 = x[nodes[2]] - x[nodes[3]];
double y14 = y[nodes[0]] - y[nodes[3]];
double y24 = y[nodes[1]] - y[nodes[3]];
double y34 = y[nodes[2]] - y[nodes[3]];
double z14 = z[nodes[0]] - z[nodes[3]];
double z24 = z[nodes[1]] - z[nodes[3]];
double z34 = z[nodes[2]] - z[nodes[3]];
double J = x14*(y24*z34 - y34*z24) - y14*(x24*z34 - z24 * x34) + z14*(x24*y34 - y24*x34);
double J_bar11 = (y24*z34 - z24*y34) / J;
double J_bar12 = (z14*y34 - y14*z34) / J;
double J_bar13 = (y14*z24 - z14*y24) / J;
double J_bar21 = (z24*x34 - x24*z34) / J;
double J_bar22 = (x14*z34 - z14*x34) / J;
double J_bar23 = (z14*x24 - x14*z24) / J;
double J_bar31 = (x24*y34 - y24*x34) / J;
double J_bar32 = (y14*x34 - x14*y34) / J;
double J_bar33 = (x14*y24 - y14*x24) / J;
/* term[0][0] = (y24*z34 - z24*y34) / J;
term[0][1]= (z14*y34 - y14*z34) / J;
term[0][2] =(y14*z24 - z14*y24) / J;
term[1][0]= (z24*x34 - x24*z24) / J;
term[1][1] = (x14*z34 - z14*x34) / J;
term[1][2] = (z14*x24 - x14*z24) / J;
term[2][0]= (x24*y34 - y24*x34) / J;
term[2][1]= (y14*x34 - x14*y34) / J;
term[2][2] = (x14*y24 - y14*x24) / J;*/
double J_star1 = -(J_bar11 + J_bar12 + J_bar13);
double J_star2 = -(J_bar21 + J_bar22 + J_bar23);
double J_star3 = -(J_bar31 + J_bar32 + J_bar33);
/*double **A = new double*[4];
double **T = new double*[3];
double **result = new double*[3];
for (int i = 0; i < 4; i++){
A[i] = new double[6];
}
for (int i = 0; i < 3; i++){
T[i] = new double[4];
result[i] = new double[6];
}
for (int row = 0; row < 3; row++){
for (int col = 0; col < 4; col++){
T[row][col] = 0;
}
}
T[0][0] = T[1][3] = T[2][1] = T[2][2] = 1;
A[0][1] = A[0][3] = A[0][5] = 0;
A[1][1] = A[1][3] = A[1][5] = 0;
A[2][0] = A[2][2] = A[2][4] = 0;
A[3][0] = A[3][2] = A[3][4] = 0;
A[0][0] = A[2][1] = y23/J;
A[0][2] = A[2][3] = y31/J;
A[0][4] = A[2][5] = y12 / J;
A[1][0] = A[3][1] = x32 / J;
A[1][2] = A[3][3] = x13 / J;
A[1][4] = A[3][5] = x21 / J;*/
for (int row = 0; row < 6; row++){
for (int col = 0; col < 12; col++){
term[row][col] = 0;
}
}
term[0][0] = term[3][1] = term[5][2] = J_bar11;
term[1][1] = term[3][0] = term[4][2] = J_bar21;
term[2][2] = term[5][0] = term[4][1] = J_bar31;
term[0][3] = term[3][4] = term[5][5] = J_bar12;
term[1][4] = term[3][3] = term[4][5] = J_bar22;
term[2][5] = term[4][4] = term[5][3] = J_bar32;
term[0][6] = term[3][7] = term[5][8] = J_bar13;
term[1][7] = term[3][6] = term[4][8] = J_bar23;
term[2][8] = term[4][7] = term[5][6] = J_bar33;
term[0][9] = term[3][10] = term[5][11] = J_star1;
term[1][10] = term[3][9] = term[4][11] = J_star2;
term[2][11] = term[4][10] = term[5][9] = J_star3;
//for (int row = 0; row < 6; row++){
// for (int col = 0; col < 12; col++){
// std::cout << term[row][col] << " ";
// }
// std::cout << std::endl;
//}
//MatrixTimes(term, T, A, 3, 4, 4, 6);
}
double Geometry::Linear2DJacobianDet_Barycentric(int *nodes, double *x, double *y){
double x13 = x[nodes[0]] - x[nodes[2]];
double x23 = x[nodes[1]] - x[nodes[2]];
double y13 = y[nodes[0]] - y[nodes[2]];
double y23 = y[nodes[1]] - y[nodes[2]];
return (x13*y23 - y13*x23);
}
double Geometry::Linear3DJacobianDet_Barycentric(int *nodes, double *x, double *y, double *z){
double x14 = x[nodes[0]] - x[nodes[3]];
double x24 = x[nodes[1]] - x[nodes[3]];
double x34 = x[nodes[2]] - x[nodes[3]];
double y14 = y[nodes[0]] - y[nodes[3]];
double y24 = y[nodes[1]] - y[nodes[3]];
double y34 = y[nodes[2]] - y[nodes[3]];
double z14 = z[nodes[0]] - z[nodes[3]];
double z24 = z[nodes[1]] - z[nodes[3]];
double z34 = z[nodes[2]] - z[nodes[3]];
//std::cout << x14*(y24*z34 - y34*z24) - y14*(x24*z34 - z24 * 34) + z14*(x24*y34 - y24*x34) << std::endl;
return (x14*(y24*z34 - y34*z24) - y14*(x24*z34 - z24 *x34) + z14*(x24*y34 - y24*x34));
}
void Geometry::Linear2DBarycentric_D(double **term, double nu, double youngE){
for (int i = 0; i < 3; i++){
for (int j = 0; j < 3; j++){
term[i][j] = 0;
}
}
/*term[0][0] = term[1][1] = 1;
term[0][1] = term[1][0] = nu;
term[2][2] = (1 - nu) / 2;*/
#if 1 // for plane stress
term[0][0] = term[1][1] = 1.0;
term[0][1] = term[1][0] = nu;
term[2][2] = (1 - nu) / 2.0;
#endif // 0
#if 0 //for plane strain
term[0][0] = 1.0;
term[1][1] = nu;
term[0][1] = term[1][0] = nu;
term[2][2] = (1.0 - nu) / 2.0;
#endif // 0
#if 0 //We won't use this here becuase too much floating errors
for (int i = 0; i < 3; i++){
for (int j = 0; j < 3; j++){
//term[i][j] = (youngE / (1 - nu*nu))*term[i][j];
//term[i][j] = (youngE / ((1.0 - nu*nu)))*term[i][j]; I will multiply this huge number after the B^T D B
}
}
#endif
}
void Geometry::Linear3DBarycentric_D(double **term, double nu, double youngE){
int multi = 2;
for (int i = 0; i < 3 * multi; i++){
for (int j = 0; j < 3 * multi; j++){
term[i][j] = 0;
}
}
/*term[0][0] = term[1][1] = 1;
term[0][1] = term[1][0] = nu;
term[2][2] = (1 - nu) / 2;*/
term[0][0] = term[1][1] = term[2][2] = (1.0 - nu);
term[0][1] = term[1][0] = term[0][2] = term[2][0] = term[1][2] = term[2][1] = nu;
term[3][3] = term[4][4] = term[5][5] = (1.0 - nu) / 2.0;
for (int i = 0; i < 3 * multi; i++){
for (int j = 0; j < 3 * multi; j++){
//term[i][j] = (youngE / (1 - nu*nu))*term[i][j];
term[i][j] = (youngE / ((1 - 2 * nu)*(1 + nu)))*term[i][j];
}
}
//for (int i = 0; i < 3 * multi; i++){
// for (int j = 0; j < 3 * multi; j++){
// std::cout << term[i][j] << " ";
// }
// std::cout << std::endl;
//}
}
void Geometry::AssembleLocalElementMatrixBarycentric2D(int elem_n,int *nodes,int **displaceinE, double *x, double *y, int dimension, double **E,double **M, double nu, double youngE, double thickness)
{
// thte dimension for B is 3x6
int n = 3;
//declear how many rows that we will have,
double **B = new double*[n];
double **D = new double*[n];
double **B_TXD = new double*[n * 2];
double **integrand = new double*[n * 2];
double **DB = new double*[n];
double *stress = new double[n];
//Now we will loop through the columns
for (int i = 0; i < n; i++){
B[i] = new double[n * 2];
D[i] = new double[n];
DB[i] = new double[n * 2];
stress[i] = 0;//initializing the stress vector
}
for (int i = 0; i <n * 2; i++){
B_TXD[i] = new double[n];
integrand[i] = new double[n * 2];
}
double J = Linear2DJacobianDet_Barycentric(nodes, x, y);
for (int row = 0; row < n * 2; row++){
for (int col = 0; col < n; col++){
B_TXD[row][col] = 0;
}
}
for (int row = 0; row < n; row++){
for (int col = 0; col < n * 2; col++){
DB[row][col] = 0;
}
}
for (int row = 0; row < n * 2; row++){
for (int col = 0; col < n * 2; col++){
integrand[row][col] = 0;
}
}
//Allocating the B and D matrices
Linear2DBarycentric_B(nodes, x, y, B);
Linear2DBarycentric_D(D, nu, youngE);
//std::cout << "B:MATRIX: " << std::endl;
//for (int row = 0; row < 3; row++){
// for (int col = 0; col < 6; col++){
// std::cout << B[row][col]<< " ";
// }
// std::cout << std::endl;
//}
//Finding B^T*D
for (int row = 0; row < n * 2; row++){
for (int col = 0; col < n; col++){
for (int k = 0; k < n; k++){
B_TXD[row][col] = B_TXD[row][col] + B[k][row] * D[k][col];
}
}
}
//Finding B^T*D*B
for (int row = 0; row < n * 2; row++){
for (int col = 0; col < n * 2; col++){
for (int k = 0; k < n; k++){
integrand[row][col] = integrand[row][col] + B_TXD[row][k] * B[k][col];
}
}
}
if (get_dynamic()){
//Find von-mises stresses
//First is D*B [DONT FORGET TO MULTIPLY BY (youngE / ((1.0 - nu*nu)))*thickness]
for (int row = 0; row < n; row++){
for (int col = 0; col < n * 2; col++){
for (int k = 0; k < n; k++){
DB[row][col] = DB[row][col] + D[row][k] * B[k][col];
}
}
}
for (int row = 0; row < n; row++){
for (int col = 0; col < n; col++){
for (int k = 0; k < 2; k++){
stress[row] = stress[row] + DB[row][col * 2 + k] * u[displaceinE[nodes[col]][k]];
}
}
#if 1 // for plain stress
stress[row] = stress[row] * (youngE / ((1.0 - nu*nu)))*thickness*(J / 2.0);
#endif // 0 // for plain stress
#if 0 // for plain strain
stress[row] = stress[row] * (youngE / ((1.0+nu)*(1-2*nu)))*thickness*(J / 2.0);
#endif // 0 // for plain stress
}
global_stress_mises[elem_n] = sqrt((stress[0] + stress[1])*(stress[0] + stress[1]) - 3 * (stress[0] * stress[1] - stress[2] * stress[2]));
#if 0 ///Print strain out
std::cout << "DB : " << std::endl;
for (int row = 0; row < 3; row++){
for (int col = 0; col < 6; col++){
std::cout << DB[row][col] << " ";
}
std::cout<<std::endl;
}
#endif
#if 0
std::cout << " stress : " << std::endl;
for (int row = 0; row < 3; row++){
std::cout << stress[row] << std::endl;
}
#endif
#if 0
std::cout << global_stress_mises[elem_n] << std::endl;
#endif
#if 0
for (int row = 0; row < 3; row++){
for (int k = 0; k < 2; k++){
std::cout << DB[row][nodes[row]] << std::endl;
}
}
#endif
//for (int row = 0; row < 6; row++){
// for (int col = 0; col < 3; col++){
// B_T[row][col] = B[col][row];
// }
//}
}
for (int row = 0; row < n * 2; row++){
for (int col = 0; col < n * 2; col++){
//row = row ^ 2;
E[row][col] = (youngE / ((1.0 - nu*nu)))*thickness*(J / 2.0) * integrand[row][col];
}
}
//std::cout << "K_elem : " << std::endl;
//for (int row = 0; row < 6; row++){
// for (int col = 0; col < 6; col++){
// std::cout << E[row][col] << " ";
// }
// std::cout << std::endl;
//}
//
double A = J / 2;
double X_i = x[0];
double X_j = x[1];
double X_k = x[2];
double Y_i = y[0];
double Y_j = y[1];
double Y_k = y[2];
double Z_i = z[0];
double Z_j = z[1];
double Z_k = z[2];
double a_i = X_j*Y_k - X_k*Y_j;
double a_j = X_k*Y_i - X_i*Y_k;
double a_k = X_i*Y_j - X_j*Y_i;
double b_i = Y_j - Y_k;
double b_j = Y_k - Y_i;
double b_k = Y_i - Y_j;
double c_i = X_k - X_j;
double c_j = X_i - X_k;
double c_k = X_j - X_i;
double rho = 1000.0;
if (get_dynamic()){
M[0][0] = 2 * A*rho*thickness / 3.0;
M[0][1] = 0.0;
M[0][2] = A*rho*thickness / 2.0;
M[0][3] = 0.0;
M[0][4] = -A*rho*thickness / 6.0;
M[0][5] = 0.0;
M[1][0] = 0.0;
M[1][1] = 2 * A*rho*thickness / 3.0;
M[1][2] = 0.0;
M[1][3] = A*rho*thickness / 2.0;
M[1][4] = 0.0;
M[1][5] = -A*rho*thickness / 6.0;
M[2][0] = A*rho*thickness / 2.0;
M[2][1] = 0.0;
M[2][2] = 2 * A*rho*thickness / 3.0;
M[2][3] = 0.0;
M[2][4] = -A*rho*thickness / 6.0;
M[2][5] = 0.0;
M[3][0] = 0.0;
M[3][1] = A*rho*thickness / 2.0;
M[3][2] = 0.0;
M[3][3] = 2.0* A*rho*thickness / 3.0;
M[3][4] = 0.0;
M[3][5] = -A*rho*thickness / 6.0;
M[4][0] = -A*rho*thickness / 6.0;
M[4][1] = 0.0;
M[4][2] = -A*rho*thickness / 6.0;
M[4][3] = 0.0;
M[4][4] = A*rho*thickness / 3.0;
M[4][5] = 0.0;
M[5][0] = 0.0;
M[5][1] = -A*rho*thickness / 6.0;
M[5][2] = 0.0;
M[5][3] = -A*rho*thickness / 6.0;
M[5][4] = 0.0;
M[5][5] = A*rho*thickness / 3.0;
}
for (int i = 0; i < n; i++){
delete B[i];
delete D[i];
delete DB[i];
}
for (int i = 0; i < n * 2; i++){
delete B_TXD[i];
delete integrand[i];
}
delete[] B;
delete[] D;
delete[] B_TXD;
delete[] integrand;
delete[] DB;
delete[] stress;
}
//**************************3D************************************//
//3333333333333333333333333333333333333333333333333333333333333333//
//DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD//
//****************************************************************//
void Geometry::AssembleLocalElementMatrixBarycentric3D(int *nodes, double *x, double *y, double *z, int dimension, double **E, double nu, double youngE, double thickness)
{
int multi = 2;
double **B = new double*[3 * multi];
double **D = new double*[3 * multi];
double **B_TXD = new double*[6 * multi];
double **integrand = new double*[6 * multi];
for (int i = 0; i < 3 * multi; i++){
B[i] = new double[6 * multi];
D[i] = new double[3 * multi];
}
for (int i = 0; i < 6 * multi; i++){
B_TXD[i] = new double[3 * multi];
integrand[i] = new double[6 * multi];
}
double J = Linear3DJacobianDet_Barycentric(nodes, x, y, z);
for (int row = 0; row < 6 * multi; row++){
for (int col = 0; col < 3 * multi; col++){
B_TXD[row][col] = 0;
}
}
for (int row = 0; row < 6 * multi; row++){
for (int col = 0; col < 6 * multi; col++){
integrand[row][col] = 0;
}
}
//Allocating the B and D matrices
Linear3DBarycentric_B(nodes, x, y, z, B);
Linear3DBarycentric_D(D, nu, youngE);
//std::cout << "B:MATRIX: " << std::endl;
//for (int row = 0; row < 3*multi; row++){
// for (int col = 0; col < 6*multi; col++){
// std::cout << B[row][col]<< " ";
// }
// std::cout << std::endl;
//}
//Finding B^T*D
for (int row = 0; row < 6 * multi; row++){
for (int col = 0; col < 3 * multi; col++){
for (int k = 0; k < 3 * multi; k++){
B_TXD[row][col] = B_TXD[row][col] + B[k][row] * D[k][col];
}
}
}
//Finding B^T*D*B
for (int row = 0; row < 6 * multi; row++){
for (int col = 0; col < 6 * multi; col++){
for (int k = 0; k < 3 * multi; k++){
integrand[row][col] = integrand[row][col] + B_TXD[row][k] * B[k][col];
}
}
}
//std::cout << "B_T x D : " << std::endl;
//for (int row = 0; row < 6; row++){
// for (int col = 0; col < 3; col++){
// std::cout << B_TXD[row][col] << " ";
// }
// std::cout<<std::endl;
//}
//for (int row = 0; row < 6; row++){
// for (int col = 0; col < 3; col++){
// B_T[row][col] = B[col][row];
// }
//}
for (int row = 0; row < 6 * multi; row++){
for (int col = 0; col < 6 * multi; col++){
E[row][col] = integrand[row][col] * J / 6.0;
}
}
//std::cout << "K_elem : " << std::endl;
//for (int row = 0; row < 6*multi; row++){
// for (int col = 0; col < 6*multi; col++){
// std::cout << E[row][col] << " ";
// }
// std::cout << std::endl;
//}
for (int i = 0; i < 3 * multi; i++){
delete B[i];
delete D[i];
}
for (int i = 0; i < 6 * multi; i++){
delete B_TXD[i];
delete integrand[i];
}
delete[] B;
delete[] D;
delete[] B_TXD;
delete[] integrand;
}
//3D
void Geometry::Linear3DBarycentric_B_CUDA_host(){
int dummy_var;
//dim3 blocks(1, 1, numE/5);//numE / (dim)
//dim3 threads(numNodesPerElem*dim, numNodesPerElem*dim, 5);
//dim3 blocks(144, (int)numE /( 32*15));//numE / (dim)
//dim3 threads(1, (int)(32 * 15));
//working 2d cuda
dim3 blocks(84, 196);//numE / (dim)
dim3 threads(12, 12);
/*for (int j = 0; j < numE;j++){
for (int i = 0; i < numNodesPerElem; i++){
nodesInElem_device[j][i] = nodesInElem[j][i];
}
}
*/
cudaMemcpy(d_x_dist, x, numNodes*sizeof(*d_x_dist), cudaMemcpyHostToDevice);
cudaMemcpy(d_y_dist, y, numNodes*sizeof(*d_x_dist), cudaMemcpyHostToDevice);
cudaMemcpy(d_z_dist, z, numNodes*sizeof(*d_x_dist), cudaMemcpyHostToDevice);
//cudaMemcpy(nodesInElem_device, nodesInElem, numE*numNodesPerElem*sizeof(int), cudaMemcpyHostToDevice);
int max_limit = (numNodesPerElem*dim*numNodesPerElem*dim*numE);
int threadsPerBlock = 256;
int blocksPerGrid = (max_limit + threadsPerBlock - 1) / threadsPerBlock;
/*cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);*/
cudaMemset(d_A_dense, 0, numNodes*dim*numNodes*dim*sizeof(*d_A_dense));
cudaMemcpy(dev_numNodes, &numNodes, 1 * sizeof(int), cudaMemcpyHostToDevice);
make_K_cuda3d << < 192, 128 >> >(E_vector_device, nodesInElem_device, d_x_dist, d_y_dist, d_z_dist, displaceInElem_device, d_A_dense, dev_numNodes);
std::clock_t cuda_K;
cuda_K = std::clock();
//cudaMemcpy(h_A_dense, d_A_dense, numNodes*dim*numNodes*dim*sizeof(*d_A_dense), cudaMemcpyDeviceToHost);
double duration_K = (std::clock() - cuda_K) / (double)CLOCKS_PER_SEC;
std::cout << "cuda k assmeble: " << duration_K << std::endl;
//cudaMemcpy(E_vector_host, E_vector_device, numNodesPerElem*dim*numNodesPerElem*dim*numE*sizeof(double), cudaMemcpyDeviceToHost);
//cudaMemcpy(x, d_x, numNodes*sizeof(double), cudaMemcpyDeviceToHost);
//cudaMemcpy(nodesInElem_host, nodesInElem_device, numE*numNodesPerElem*sizeof(int), cudaMemcpyDeviceToHost);
//std::cout << " K _ CUDA " << std::endl;
////for (int j = 0; j < 2; j++){
//// for (int i = 0; i < numNodesPerElem; i++){
//// std::cout << nodesInElem_host[nodesinelemX(i, j, numNodesPerElem)] << " ";
//// }
//// std::cout << std::endl;
////}
//for (int j = 0; j < 10; j++){
// for (int i = 0; i < 10; i++){
// std::cout << h_A_dense[IDX2C(i, j, 3000)] << " ";
// }
// std::cout << std::endl;
//}
////Print local K matrix
//for (int e = 0; e < numE; e++){
// //std::cout << "element : " << e << std::endl;
// for (int i = 0; i < numNodesPerElem*dim; i++){
// for (int j = 0; j < numNodesPerElem*dim; j++){
//
// //E[e][i][j] = E_vector_host[threeD21D(i, j, e, numNodesPerElem*dim, numNodesPerElem*dim)];
// //std::cout << E[e][i][j] << " ";
// }
// //std::cout << std::endl;
// }
// //std::cout << std::endl;
//}
//std::cout << std::endl << " the x value : " << x[0] << std::endl;
/*(cudaMemcpy(&c, dev_c, sizeof(int),
cudaMemcpyDeviceToHost));
printf("2 + 7 = %d\n", c);
(cudaFree(dev_c));*/
}
//2D
void Geometry::Linear2DBarycentric_B_CUDA_host(){
cudaMemcpy(d_x, x, numNodes*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, numNodes*sizeof(double), cudaMemcpyHostToDevice);
dim3 blocks((numE+35)/35 , 1);//numE / (dim)
dim3 threads(36,36);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
cudaMemset(d_A_dense, 0.0, numNodes*dim*numNodes*dim*sizeof(*d_A_dense));
make_K_cuda2d << <147,112 >> >(E_vector_device, nodesInElem_device, d_x, d_y, displaceInElem_device, d_A_dense, numNodes, thickness, Young, Poisson, c_alpha, beta_1, beta_2, density, dt, c_xi, numE);
//make_K_cuda2d << <blocks, threads >> >(E_vector_device, nodesInElem_device, d_x, d_y, displaceInElem_device, d_A_dense, numNodes, thickness, Young, Poisson, c_alpha, beta_1, beta_2, density, dt, c_xi, numE);
cudaMemcpy(h_A_dense, d_A_dense, numNodes*dim*numNodes*dim*sizeof(*d_A_dense), cudaMemcpyDeviceToHost);
//cudaMemcpy(E_vector_host, E_vector_device, numNodesPerElem*dim*numNodesPerElem*dim*numE*sizeof(double), cudaMemcpyDeviceToHost);
//cudaMemcpy(x, d_x, numNodes*sizeof(double), cudaMemcpyDeviceToHost);
//cudaMemcpy(nodesInElem_host, nodesInElem_device, numE*numNodesPerElem*sizeof(int), cudaMemcpyDeviceToHost);
//std::cout << " K _ CUDA " << std::endl;
////for (int j = 0; j < 2; j++){
//// for (int i = 0; i < numNodesPerElem; i++){
//// std::cout << nodesInElem_host[nodesinelemX(i, j, numNodesPerElem)] << " ";
//// }
//// std::cout << std::endl;
////}
//for (int j = 0; j < 10; j++){
// for (int i = 0; i < 10; i++){
// std::cout << h_A_dense[IDX2C(i, j, numNodes*2)] << " ";
// }
// std::cout << std::endl;
//}
////Print local K matrix
//for (int e = 0; e < numE; e++){
// //std::cout << "element : " << e << std::endl;
// for (int i = 0; i < numNodesPerElem*dim; i++){
// for (int j = 0; j < numNodesPerElem*dim; j++){
//
// //E[e][i][j] = E_vector_host[threeD21D(i, j, e, numNodesPerElem*dim, numNodesPerElem*dim)];
// //std::cout << E[e][i][j] << " ";
// }
// //std::cout << std::endl;
// }
// //std::cout << std::endl;
//}
//std::cout << std::endl << " the x value : " << x[0] << std::endl;
/*(cudaMemcpy(&c, dev_c, sizeof(int),
cudaMemcpyDeviceToHost));
printf("2 + 7 = %d\n", c);
(cudaFree(dev_c));*/
}
void Geometry::make_surface_f(){
}
void Geometry::ApplyEssentialBoundaryConditionsBarycentric(int numP, int numBC, int *localcoord, int *elemForce, double forceVec_x, double forceVec_y, double *f, double **K, int **nodesInElem, double thickness, double *x, double *y, int **displaceInElem){
int local; // used to store local coord info
int node_interest[2];// use two ints to tell us which 2 of the nodes in the element would be useful
int row, col;
int element;
int node;
double length;
double x_1, y_1, x_2, y_2;
//for (int i = 0; i < numBC; i++){
elemForce[0] = numBC;
//local = localcoord[i];
int i = 0;
local = 1;
if (local == 0){//Opposite to xi_1 direction
node_interest[0] = 1;
node_interest[1] = 2;
x_1 = x[nodesInElem[elemForce[i]][node_interest[0]]];
y_1 = y[nodesInElem[elemForce[i]][node_interest[0]]];
x_2 = x[nodesInElem[elemForce[i]][node_interest[1]]];
y_2 = y[nodesInElem[elemForce[i]][node_interest[1]]];
length = sqrt(pow(x_1 - x_2, 2.0) + pow(y_1 - y_2, 2.0));
}
else if (local == 1){//Opposite to xi_2 direction
node_interest[0] = 0;
node_interest[1] = 2;
x_1 = x[nodesInElem[elemForce[i]][node_interest[0]]];
y_1 = y[nodesInElem[elemForce[i]][node_interest[0]]];
x_2 = x[nodesInElem[elemForce[i]][node_interest[1]]];
y_2 = y[nodesInElem[elemForce[i]][node_interest[1]]];
length = sqrt(pow(x_1 - x_2, 2.0) + pow(y_1 - y_2, 2.0));
}
else if (local == 2){ // Opposite to xi_3 direction
node_interest[0] = 0;
node_interest[1] = 1;
x_1 = x[nodesInElem[elemForce[i]][node_interest[0]]];
y_1 = y[nodesInElem[elemForce[i]][node_interest[0]]];
x_2 = x[nodesInElem[elemForce[i]][node_interest[1]]];
y_2 = y[nodesInElem[elemForce[i]][node_interest[1]]];
length = sqrt(pow(x_1 - x_2, 2.0) + pow(y_1 - y_2, 2.0));
}
//cout << endl << "length: " << length << endl;
element = elemForce[i];
for (int node_c = 0; node_c < 2; node_c++){
node = nodesInElem[element][node_interest[node_c]];
for (int dof = 0; dof < 2; dof++){
row = displaceInElem[node][dof];
for (int dummy_V = 0; dummy_V < numP; dummy_V++){
//K[row][dummy_V] = 0;
}
//K[row][row] = 1;
if (dof == 0){
//f[row] = f[row] + (length*thickness / 2)*forceVec_x[i];
f[row] = f[row] + (length*thickness / 2)*forceVec_x;
}
else if (dof == 1){
//f[row] = f[row] + (length*thickness / 2)*forceVec_y[i];
f[row] = f[row] + (length*thickness / 2)*forceVec_y;
}
}
}
//}
}
void Geometry::ApplySudoForcesBarycentric(int numP, int node_applied, int *localcoord, int *elemForce, double forceVec_x, double forceVec_y, double *g, int **nodesInElem, double thickness, double *x, double *y, int **displaceInElem){
int local; // used to store local coord info
int node_interest[2];// use two ints to tell us which 2 of the nodes in the element would be useful
int row, col;
int element;
int node;
double length;
double x_1, y_1, x_2, y_2;
for (int dummy_V = 0; dummy_V < numP; dummy_V++){
f[dummy_V] = 0;
}
#if 1
//cout << endl << "length: " << length << endl;
//int node_c = node_applied;
double f1, f2;
for (int findex = 0; findex < 4; findex++){
int node_c = sudo_force_index[findex];
//******************************THIS NEEDS CHANGING **************************************//
if (findex == 0){
f1 = sudo_force_value1[0];
f2 = sudo_force_value1[1];
}
else if (findex == 1){
f1 = sudo_force_value2[0];
f2 = sudo_force_value2[1];
}
else if (findex ==2){
f1 = sudo_force_value3[0];
f2 = sudo_force_value3[1];
}
else if (findex == 3){
f1 = sudo_force_value4[0];
f2 = sudo_force_value4[1];
}
/*double forceVec_x1 = sudo_force_value2[0];
double forceVec_y1 = sudo_force_value2[1];*/
for (int dof = 0; dof < dim; dof++){
row = displaceInElem[node_c][dof];
for (int dummy_V = 0; dummy_V < numP; dummy_V++){
//K[row][dummy_V] = 0;
}
//K[row][row] = 1;
if (dof == 0){
f[row] += f1;
}
else if (dof == 1){
f[row] += f2;
}
else if (dof == 2){
// f[row] += forceVec_y1;
}
}
}
#endif // 0
}
void Geometry::set_zero_nodes(int *points){
//We have to make the corresponding L matrix and rhs vectors set to the correct values.
for (int i = 0; i < numNodesZero; i++){
vector_zero_nodes[i] = points[i];
}
}
void Geometry::set_zero_AxB(void){
int row1, row2;
#if 1
for (int i = 0; i < numNodesZero; i++){
row1 = displaceInElem[vector_zero_nodes[i]][0];
row2 = displaceInElem[vector_zero_nodes[i]][1];
for (int col = 0; col < Ncols; col++){
L[IDX2C(col, row1, N)] = 0.0;
L[IDX2C(col, row2, N)] = 0.0;
}
L[IDX2C(row1, row1, N)] = 1.0;
L[IDX2C(row2, row2, N)] = 1.0;
b_rhs[row1] = 0.0;
b_rhs[row2] = 0.0;
}
#else
for (int i = 0; i < numNodesZero; i++){
row1 = displaceInElem[vector_zero_nodes[i]][0];
row2 = displaceInElem[vector_zero_nodes[i]][1];
for (int col = 0; col < Ncols; col++){
h_A_dense[IDX2C(col, row1, N)] = 0.0;
h_A_dense[IDX2C(col, row2, N)] = 0.0;
}
h_A_dense[IDX2C(row1, row1, N)] = 1.0;
h_A_dense[IDX2C(row2, row2, N)] = 1.0;
/*f[row1] = 0.0;
f[row2] = 0.0;*/
b_rhs[row1] = 0.0;
b_rhs[row2] = 0.0;
}
#endif // 0
#if 0
row1 = displaceInElem[sudo_force_index[0]][0];
row2 = displaceInElem[sudo_force_index[0]][1];
for (int col = 0; col < Ncols; col++){
L[IDX2C(col, row1, N)] = 0.0;
L[IDX2C(col, row2, N)] = 0.0;
}
L[IDX2C(row1, row1, N)] = 1.0;
L[IDX2C(row2, row2, N)] = 1.0;
b_rhs[row1] = (1.0 / beta_2)*((sudo_force_value1[0] - dt*u_dot[row1]) - (1.0 - beta_2)*u_doubledot_old[row1]);
b_rhs[row2] = (1.0 / beta_2)*((sudo_force_value1[1] - dt*u_dot[row2]) - (1.0 - beta_2)*u_doubledot_old[row2]);;
#endif // 0
}
void Geometry::initialize_CUDA(void){
Nrows = numNodes*dim; // --- Number of rows
Ncols = numNodes*dim; // --- Number of columns
N = Nrows;
cusparseSafeCall(cusparseCreate(&handle));
//h_A_dense = (float*)malloc(Nrows*Ncols*sizeof(*h_A_dense));
cusparseSafeCall(cusparseCreateMatDescr(&descrA));
cusparseSafeCall(cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL));
cusparseSafeCall(cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE));
nnz = 0; // --- Number of nonzero elements in dense matrix
lda = Nrows; // --- Leading dimension of dense matrix
gpuErrchk(cudaMalloc(&d_nnzPerVector, Nrows * sizeof(*d_nnzPerVector)));
h_nnzPerVector = (int *)malloc(Nrows * sizeof(*h_nnzPerVector));
//device side dense matrix
gpuErrchk(cudaMalloc(&d_A_RowIndices, (Nrows + 1) * sizeof(*d_A_RowIndices)));
cudaMalloc((void **)&dev_numNodes, sizeof(dev_numNodes));
cudaMemcpy(dev_numNodes, &numNodes, sizeof(dev_numNodes), cudaMemcpyHostToDevice);
//cudaMemcpy(&numNodes,dev_numNodes , sizeof(dev_numNodes), cudaMemcpyDeviceToHost);
cusparseSafeCall(cusparseCreateMatDescr(&descr_L));
cusparseSafeCall(cusparseSetMatIndexBase(descr_L, CUSPARSE_INDEX_BASE_ONE));
cusparseSafeCall(cusparseSetMatType(descr_L, CUSPARSE_MATRIX_TYPE_GENERAL));
cusparseSafeCall(cusparseSetMatFillMode(descr_L, CUSPARSE_FILL_MODE_LOWER));
cusparseSafeCall(cusparseSetMatDiagType(descr_L, CUSPARSE_DIAG_TYPE_NON_UNIT));
//emeory in cholesky
cusparseSafeCall(cusparseCreateCsric02Info(&info_A));
cusparseSafeCall(cusparseCreateCsrsv2Info(&info_L));
cusparseSafeCall(cusparseCreateCsrsv2Info(&info_Lt));
}
int Geometry::tt()
{
// --- Initialize cuSPARSE
// --- Host side dense matrix
double duration_K;
// --- Column-major ordering
/*h_A_dense[0] = 0.4612f; h_A_dense[4] = -0.0006f; h_A_dense[8] = 0.3566f; h_A_dense[12] = 0.0f;
h_A_dense[1] = -0.0006f; h_A_dense[5] = 0.4640f; h_A_dense[9] = -1000.0723f; h_A_dense[13] = 0.0f;
h_A_dense[2] = 0.3566f; h_A_dense[6] = 0.0723f; h_A_dense[10] = 100.7543f; h_A_dense[14] = 0.0f;
h_A_dense[3] = 0.f; h_A_dense[7] = 0.0f; h_A_dense[11] = 0.0f; h_A_dense[15] = 0.1f;
*/
//for (int col = 0; col < Ncols; col++){
// for (int row = 0; row < Nrows; row++){
// h_A_dense[IDX2C(col, row, N)] = (float) h_A_dense_double[IDX2C(col, row, N)];
// //a[IDX2C(col, row, n)] = (float)ind++;
// //h_A_dense[IDX2C(col, row, N)] = 0;
// }
//}
if (!cuda_use){
for (int col = 0; col < Ncols; col++){
h_A_dense[IDX2C(col, 0, N)] = 0;
h_A_dense[IDX2C(col, 1, N)] = 0;
if (dim == 3){
h_A_dense[IDX2C(col, 2, N)] = 0;
}
}
h_A_dense[IDX2C(0, 0, N)] = 1.0;
h_A_dense[IDX2C(1, 1, N)] = 1.0;
if (dim == 3){
h_A_dense[IDX2C(2, 2, N)] = 1.0;
}
set_zero_AxB();
gpuErrchk(cudaMemcpy(d_A_dense, h_A_dense, Nrows * Ncols * sizeof(*d_A_dense), cudaMemcpyHostToDevice));
}
#if 0
std::ofstream writenodes("global_K.txt");
for (int j = 0; j < N; j++){
for (int i = 0; i < N; i++){
writenodes << h_A_dense[IDX2C(j, i, N)] << " ";
}
writenodes << std::endl;
}
writenodes.close();
#endif // 0
// --- Create device array and copy host array to it
/*for (int j = 0; j < 20; j++){
for (int i = 0; i < 20; i++){
std::cout << h_A_dense[IDX2C(j, i, N)] << std::endl;
}
std::cout<<std::endl;
}*/
// --- Descriptor for sparse matrix A
// --- Device side number of nonzero elements per row
cusparseSafeCall(cusparseSnnz(handle, CUSPARSE_DIRECTION_ROW, Nrows, Ncols, descrA, d_A_dense, lda, d_nnzPerVector, &nnz));
// --- Host side number of nonzero elements per row
gpuErrchk(cudaMemcpy(h_nnzPerVector, d_nnzPerVector, Nrows * sizeof(*h_nnzPerVector), cudaMemcpyDeviceToHost));
/*printf("Number of nonzero elements in dense matrix = %i\n\n", nnz);
for (int i = 0; i < 10; ++i) printf("Number of nonzero elements in row %i = %i \n", i, h_nnzPerVector[i]);
printf("\n");*/
// --- Device side dense matrix
gpuErrchk(cudaMalloc(&d_A, nnz * sizeof(*d_A)));
gpuErrchk(cudaMalloc(&d_A_ColIndices, nnz * sizeof(*d_A_ColIndices)));
cusparseSafeCall(cusparseSdense2csr(handle, Nrows, Ncols, descrA, d_A_dense, lda, d_nnzPerVector, d_A, d_A_RowIndices, d_A_ColIndices));
std::clock_t start_K;
start_K = std::clock();
// --- Host side dense matrix
float *h_A = (float *)malloc(nnz * sizeof(*h_A));
int *h_A_RowIndices = (int *)malloc((Nrows + 1) * sizeof(*h_A_RowIndices));
int *h_A_ColIndices = (int *)malloc(nnz * sizeof(*h_A_ColIndices));
gpuErrchk(cudaMemcpy(h_A, d_A, nnz*sizeof(*h_A), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_A_RowIndices, d_A_RowIndices, (Nrows + 1) * sizeof(*h_A_RowIndices), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_A_ColIndices, d_A_ColIndices, nnz * sizeof(*h_A_ColIndices), cudaMemcpyDeviceToHost));
std::cout << nnz << std::endl;
/*printf("\nOriginal matrix in CSR format\n\n");
for (int i = 0; i < 10; ++i) printf("A[%i] = %.0f ", i, h_A[i]); printf("\n");
printf("\n");
for (int i = 0; i < (10 + 1); ++i) printf("h_A_RowIndices[%i] = %i \n", i, h_A_RowIndices[i]); printf("\n");
for (int i = 0; i < 10; ++i) printf("h_A_ColIndices[%i] = %i \n", i, h_A_ColIndices[i]);
*/
// --- Allocating and defining dense host and device data vectors
float *h_x = (float *)malloc(Nrows * sizeof(float));
/*h_x[0] = 100.0; h_x[1] = 200.0; h_x[2] = 400.0; h_x[3] = 500.0;*/
for (int i = 0; i < N; i++){
h_x[i] = f[i];
}
if (dim == 3){
h_x[0] = h_x[1] = h_x[2] = 0;
}
else {
h_x[0] = h_x[1] = 0;
}
float *d_x; gpuErrchk(cudaMalloc(&d_x, Nrows * sizeof(float)));
gpuErrchk(cudaMemcpy(d_x, h_x, Nrows * sizeof(float), cudaMemcpyHostToDevice));
/******************************************/
/* STEP 1: CREATE DESCRIPTORS FOR L AND U */
/******************************************/
/********************************************************************************************************/
/* STEP 2: QUERY HOW MUCH MEMORY USED IN CHOLESKY FACTORIZATION AND THE TWO FOLLOWING SYSTEM INVERSIONS */
/********************************************************************************************************/
int pBufferSize_M, pBufferSize_L, pBufferSize_Lt;
cusparseSafeCall(cusparseScsric02_bufferSize(handle, N, nnz, descrA, d_A, d_A_RowIndices, d_A_ColIndices, info_A, &pBufferSize_M));
cusparseSafeCall(cusparseScsrsv2_bufferSize(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_L, &pBufferSize_L));
cusparseSafeCall(cusparseScsrsv2_bufferSize(handle, CUSPARSE_OPERATION_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_Lt, &pBufferSize_Lt));
int pBufferSize = max(pBufferSize_M, max(pBufferSize_L, pBufferSize_Lt));
void *pBuffer = 0; gpuErrchk(cudaMalloc((void**)&pBuffer, pBufferSize));
/******************************************************************************************************/
/* STEP 3: ANALYZE THE THREE PROBLEMS: CHOLESKY FACTORIZATION AND THE TWO FOLLOWING SYSTEM INVERSIONS */
/******************************************************************************************************/
int structural_zero;
cusparseSafeCall(cusparseScsric02_analysis(handle, N, nnz, descrA, d_A, d_A_RowIndices, d_A_ColIndices, info_A, CUSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
cusparseStatus_t status = cusparseXcsric02_zeroPivot(handle, info_A, &structural_zero);
if (CUSPARSE_STATUS_ZERO_PIVOT == status){ printf("A(%d,%d) is missing\n", structural_zero, structural_zero); }
cusparseSafeCall(cusparseScsrsv2_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_L, CUSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
cusparseSafeCall(cusparseScsrsv2_analysis(handle, CUSPARSE_OPERATION_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_Lt, CUSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer));
/*************************************/
/* STEP 4: FACTORIZATION: A = L * L' */
/*************************************/
int numerical_zero;
cusparseSafeCall(cusparseScsric02(handle, N, nnz, descrA, d_A, d_A_RowIndices, d_A_ColIndices, info_A, CUSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
status = cusparseXcsric02_zeroPivot(handle, info_A, &numerical_zero);
/*if (CUSPARSE_STATUS_ZERO_PIVOT == status){ printf("L(%d,%d) is zero\n", numerical_zero, numerical_zero); }
*/
gpuErrchk(cudaMemcpy(h_A, d_A, nnz * sizeof(float), cudaMemcpyDeviceToHost));
/*printf("\nNon-zero elements in Cholesky matrix\n\n");
for (int k = 0; k<10; k++) printf("%f\n", h_A[k]);*/
cusparseSafeCall(cusparseScsr2dense(handle, Nrows, Ncols, descrA, d_A, d_A_RowIndices, d_A_ColIndices, d_A_dense, Nrows));
/*printf("\nCholesky matrix\n\n");
for (int i = 0; i < 10; i++) {
std::cout << "[ ";
for (int j = 0; j < 10; j++)
std::cout << h_A_dense[i * Ncols + j] << " ";
std::cout << "]\n";
}*/
/*********************/
/* STEP 5: L * z = x */
/*********************/
// --- Allocating the intermediate result vector
float *d_z; gpuErrchk(cudaMalloc(&d_z, N * sizeof(float)));
const float alpha = 1.;
cusparseSafeCall(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, nnz, &alpha, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_L, d_x, d_z, CUSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
/**********************/
/* STEP 5: L' * y = z */
/**********************/
// --- Allocating the host and device side result vector
float *h_y = (float *)malloc(Ncols * sizeof(float));
float *d_y; gpuErrchk(cudaMalloc(&d_y, Ncols * sizeof(float)));
cusparseSafeCall(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_TRANSPOSE, N, nnz, &alpha, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_Lt, d_z, d_y, CUSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer));
cudaMemcpy(h_x, d_y, N * sizeof(float), cudaMemcpyDeviceToHost);
printf("\n\nFinal result\n");
/*for (int k = 0; k<20; k++) printf("dx[%i] = %f\n", k, h_x[k]);
for (int k = 0; k<20; k++) printf("xs[%i] = %f\n", k, x[k]);*/
for (int i = 0; i < numNodes; i++) {
x[i] = x[i] + h_x[i * dim];
y[i] = y[i] + h_x[i * dim + 1];
if (dim == 3){
z[i] = z[i] + h_x[i * dim + 2];
}
}
free(h_A);
free(h_A_RowIndices);
free(h_A_ColIndices);
//free(h_x);
free(h_y);
cudaFree(d_x);
cudaFree(pBuffer);
cudaFree(d_z);
cudaFree(d_y);
duration_K = (std::clock() - start_K) / (double)CLOCKS_PER_SEC;
//std::cout << " change status : " << changeNode << std::endl;
//std::cout << "FPS time: " <<1/duration_K << std::endl;
//std::cout << "Duration: " << duration_K << std::endl;
return 0;
}
//int Geometry::tt()
//{
// // --- Initialize cuSPARSE
// cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle));
//
// const int Nrows = numNodes*dim; // --- Number of rows
// const int Ncols = numNodes*dim; // --- Number of columns
// const int N = Nrows;
//
// // --- Host side dense matrix
// double *h_A_dense = (double*)malloc(Nrows*Ncols*sizeof(*h_A_dense));
//
// // --- Column-major ordering
// /*h_A_dense[0] = 0.4612f; h_A_dense[4] = -0.0006f; h_A_dense[8] = 0.3566f; h_A_dense[12] = 0.0f;
// h_A_dense[1] = -0.0006f; h_A_dense[5] = 0.4640f; h_A_dense[9] = -1000.0723f; h_A_dense[13] = 0.0f;
// h_A_dense[2] = 0.3566f; h_A_dense[6] = 0.0723f; h_A_dense[10] = 100.7543f; h_A_dense[14] = 0.0f;
// h_A_dense[3] = 0.f; h_A_dense[7] = 0.0f; h_A_dense[11] = 0.0f; h_A_dense[15] = 0.1f;
// */
// for (int col = 0; col < Ncols; col++){
// for (int row = 0; row < Nrows; row++){
//
// h_A_dense[IDX2C(col, row, N)] = K[col][row];
// //a[IDX2C(col, row, n)] = (double)ind++;
// //h_A_dense[IDX2C(col, row, N)] = 0;
//
//
// }
//
// }
// for (int col = 0; col < Ncols; col++){
//
// h_A_dense[IDX2C(col, 0, N)] = 0;
// h_A_dense[IDX2C(col, 1, N)] = 0;
// }
// h_A_dense[IDX2C(0, 0, N)] = 1;
// h_A_dense[IDX2C(1, 1, N)] = 1;
//
//
// // --- Create device array and copy host array to it
// double *d_A_dense; gpuErrchk(cudaMalloc(&d_A_dense, Nrows * Ncols * sizeof(*d_A_dense)));
// gpuErrchk(cudaMemcpy(d_A_dense, h_A_dense, Nrows * Ncols * sizeof(*d_A_dense), cudaMemcpyHostToDevice));
//
// // --- Descriptor for sparse matrix A
// cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA));
// cusparseSafeCall(cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL));
// cusparseSafeCall(cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE));
//
// int nnz = 0; // --- Number of nonzero elements in dense matrix
// const int lda = Nrows; // --- Leading dimension of dense matrix
// // --- Device side number of nonzero elements per row
// int *d_nnzPerVector; gpuErrchk(cudaMalloc(&d_nnzPerVector, Nrows * sizeof(*d_nnzPerVector)));
// cusparseSafeCall(cusparseDnnz(handle, CUSPARSE_DIRECTION_ROW, Nrows, Ncols, descrA, d_A_dense, lda, d_nnzPerVector, &nnz));
// // --- Host side number of nonzero elements per row
// int *h_nnzPerVector = (int *)malloc(Nrows * sizeof(*h_nnzPerVector));
// gpuErrchk(cudaMemcpy(h_nnzPerVector, d_nnzPerVector, Nrows * sizeof(*h_nnzPerVector), cudaMemcpyDeviceToHost));
//
// /*printf("Number of nonzero elements in dense matrix = %i\n\n", nnz);
// for (int i = 0; i < 10; ++i) printf("Number of nonzero elements in row %i = %i \n", i, h_nnzPerVector[i]);
// printf("\n");*/
//
// // --- Device side dense matrix
// double *d_A; gpuErrchk(cudaMalloc(&d_A, nnz * sizeof(*d_A)));
// int *d_A_RowIndices; gpuErrchk(cudaMalloc(&d_A_RowIndices, (Nrows + 1) * sizeof(*d_A_RowIndices)));
// int *d_A_ColIndices; gpuErrchk(cudaMalloc(&d_A_ColIndices, nnz * sizeof(*d_A_ColIndices)));
//
// cusparseSafeCall(cusparseDdense2csr(handle, Nrows, Ncols, descrA, d_A_dense, lda, d_nnzPerVector, d_A, d_A_RowIndices, d_A_ColIndices));
//
// // --- Host side dense matrix
// double *h_A = (double *)malloc(nnz * sizeof(*h_A));
// int *h_A_RowIndices = (int *)malloc((Nrows + 1) * sizeof(*h_A_RowIndices));
// int *h_A_ColIndices = (int *)malloc(nnz * sizeof(*h_A_ColIndices));
// gpuErrchk(cudaMemcpy(h_A, d_A, nnz*sizeof(*h_A), cudaMemcpyDeviceToHost));
// gpuErrchk(cudaMemcpy(h_A_RowIndices, d_A_RowIndices, (Nrows + 1) * sizeof(*h_A_RowIndices), cudaMemcpyDeviceToHost));
// gpuErrchk(cudaMemcpy(h_A_ColIndices, d_A_ColIndices, nnz * sizeof(*h_A_ColIndices), cudaMemcpyDeviceToHost));
//
// /*printf("\nOriginal matrix in CSR format\n\n");
// for (int i = 0; i < 10; ++i) printf("A[%i] = %.0f ", i, h_A[i]); printf("\n");
//
// printf("\n");
// for (int i = 0; i < (10 + 1); ++i) printf("h_A_RowIndices[%i] = %i \n", i, h_A_RowIndices[i]); printf("\n");
//
// for (int i = 0; i < 10; ++i) printf("h_A_ColIndices[%i] = %i \n", i, h_A_ColIndices[i]);
// */
// // --- Allocating and defining dense host and device data vectors
// double *h_x = (double *)malloc(Nrows * sizeof(double));
// /*h_x[0] = 100.0; h_x[1] = 200.0; h_x[2] = 400.0; h_x[3] = 500.0;*/
// for (int i = 0; i < N; i++){
// h_x[i] = f[i];
// }
// h_x[0] = h_x[1] = 0;
//
// double *d_x; gpuErrchk(cudaMalloc(&d_x, Nrows * sizeof(double)));
// gpuErrchk(cudaMemcpy(d_x, h_x, Nrows * sizeof(double), cudaMemcpyHostToDevice));
//
// /******************************************/
// /* STEP 1: CREATE DESCRIPTORS FOR L AND U */
// /******************************************/
// cusparseMatDescr_t descr_L = 0;
// cusparseSafeCall(cusparseCreateMatDescr(&descr_L));
// cusparseSafeCall(cusparseSetMatIndexBase(descr_L, CUSPARSE_INDEX_BASE_ONE));
// cusparseSafeCall(cusparseSetMatType(descr_L, CUSPARSE_MATRIX_TYPE_GENERAL));
// cusparseSafeCall(cusparseSetMatFillMode(descr_L, CUSPARSE_FILL_MODE_LOWER));
// cusparseSafeCall(cusparseSetMatDiagType(descr_L, CUSPARSE_DIAG_TYPE_NON_UNIT));
//
// /********************************************************************************************************/
// /* STEP 2: QUERY HOW MUCH MEMORY USED IN CHOLESKY FACTORIZATION AND THE TWO FOLLOWING SYSTEM INVERSIONS */
// /********************************************************************************************************/
// csric02Info_t info_A = 0; cusparseSafeCall(cusparseCreateCsric02Info(&info_A));
// csrsv2Info_t info_L = 0; cusparseSafeCall(cusparseCreateCsrsv2Info(&info_L));
// csrsv2Info_t info_Lt = 0; cusparseSafeCall(cusparseCreateCsrsv2Info(&info_Lt));
//
// int pBufferSize_M, pBufferSize_L, pBufferSize_Lt;
// cusparseSafeCall(cusparseDcsric02_bufferSize(handle, N, nnz, descrA, d_A, d_A_RowIndices, d_A_ColIndices, info_A, &pBufferSize_M));
// cusparseSafeCall(cusparseDcsrsv2_bufferSize(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_L, &pBufferSize_L));
// cusparseSafeCall(cusparseDcsrsv2_bufferSize(handle, CUSPARSE_OPERATION_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_Lt, &pBufferSize_Lt));
//
// int pBufferSize = max(pBufferSize_M, max(pBufferSize_L, pBufferSize_Lt));
// void *pBuffer = 0; gpuErrchk(cudaMalloc((void**)&pBuffer, pBufferSize));
//
// /******************************************************************************************************/
// /* STEP 3: ANALYZE THE THREE PROBLEMS: CHOLESKY FACTORIZATION AND THE TWO FOLLOWING SYSTEM INVERSIONS */
// /******************************************************************************************************/
// int structural_zero;
//
// cusparseSafeCall(cusparseDcsric02_analysis(handle, N, nnz, descrA, d_A, d_A_RowIndices, d_A_ColIndices, info_A, CUSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
//
// cusparseStatus_t status = cusparseXcsric02_zeroPivot(handle, info_A, &structural_zero);
// if (CUSPARSE_STATUS_ZERO_PIVOT == status){ printf("A(%d,%d) is missing\n", structural_zero, structural_zero); }
//
// cusparseSafeCall(cusparseDcsrsv2_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_L, CUSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
// cusparseSafeCall(cusparseDcsrsv2_analysis(handle, CUSPARSE_OPERATION_TRANSPOSE, N, nnz, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_Lt, CUSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer));
//
// /*************************************/
// /* STEP 4: FACTORIZATION: A = L * L' */
// /*************************************/
// int numerical_zero;
//
// cusparseSafeCall(cusparseDcsric02(handle, N, nnz, descrA, d_A, d_A_RowIndices, d_A_ColIndices, info_A, CUSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
// status = cusparseXcsric02_zeroPivot(handle, info_A, &numerical_zero);
// /*if (CUSPARSE_STATUS_ZERO_PIVOT == status){ printf("L(%d,%d) is zero\n", numerical_zero, numerical_zero); }
// */
//
// gpuErrchk(cudaMemcpy(h_A, d_A, nnz * sizeof(double), cudaMemcpyDeviceToHost));
// /*printf("\nNon-zero elements in Cholesky matrix\n\n");
// for (int k = 0; k<10; k++) printf("%f\n", h_A[k]);*/
//
// cusparseSafeCall(cusparseDcsr2dense(handle, Nrows, Ncols, descrA, d_A, d_A_RowIndices, d_A_ColIndices, d_A_dense, Nrows));
//
// /*printf("\nCholesky matrix\n\n");
// for (int i = 0; i < 10; i++) {
// std::cout << "[ ";
// for (int j = 0; j < 10; j++)
// std::cout << h_A_dense[i * Ncols + j] << " ";
// std::cout << "]\n";
// }*/
//
// /*********************/
// /* STEP 5: L * z = x */
// /*********************/
// // --- Allocating the intermediate result vector
// double *d_z; gpuErrchk(cudaMalloc(&d_z, N * sizeof(double)));
//
// const double alpha = 1.;
// cusparseSafeCall(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, nnz, &alpha, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_L, d_x, d_z, CUSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer));
//
// /**********************/
// /* STEP 5: L' * y = z */
// /**********************/
// // --- Allocating the host and device side result vector
// double *h_y = (double *)malloc(Ncols * sizeof(double));
// double *d_y; gpuErrchk(cudaMalloc(&d_y, Ncols * sizeof(double)));
//
// cusparseSafeCall(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_TRANSPOSE, N, nnz, &alpha, descr_L, d_A, d_A_RowIndices, d_A_ColIndices, info_Lt, d_z, d_y, CUSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer));
//
// cudaMemcpy(h_x, d_y, N * sizeof(double), cudaMemcpyDeviceToHost);
// /*printf("\n\nFinal result\n");
// for (int k = 0; k<10; k++) printf("x[%i] = %f\n", k, h_x[k]);
// */
// for (int i = 0; i < numNodes; i++) {
// x[i] = x[i] + h_x[i * 2];
// y[i] = y[i] + h_x[i * 2 + 1];
// }
// cudaFree(d_A_dense);
// cudaFree(d_nnzPerVector);
// cudaFree(d_A);
// cudaFree(d_A_RowIndices);
// cudaFree(d_A_ColIndices);
// cudaFree(d_x);
// cudaFree(pBuffer);
// cudaFree(d_z);
// cudaFree(d_y);
//
// free(h_nnzPerVector);
//
// free(h_A_dense);
//
// free(h_A);
// free(h_A_RowIndices);
// free(h_A_ColIndices);
// free(h_x);
// free(h_y);
//
// return 0;
//} |
51f180de063730f98114ff93060077d1dfd45d4e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/pairwise_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ForwardGPU(const int nthreads, const int num, const Dtype* similarity,
const Dtype* exp_product, const Dtype* product, const Dtype threshold, Dtype* count, Dtype* loss_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
count[index] = Dtype(1.0);
if((threshold >= 0) && (product[index] >= threshold)){
loss_data[index] = product[index] * (1 - (similarity[index] > 0));
}
else{
loss_data[index] = log(1 + exp_product[index]) - (similarity[index] > 0) * product[index];
}
}
}
template <typename Dtype>
__global__ void TSNEProduct(const int nthreads, const int outer_num, const int inner_num, const Dtype* input1, const Dtype* input2, Dtype* tproduct, Dtype* out){
CUDA_KERNEL_LOOP(index, nthreads){
int data_id1 = index / outer_num;
int data_id2 = index % outer_num;
Dtype sum = 0;
for (int i = 0; i < inner_num; i++){
sum += (input1[data_id1 * inner_num + i] - input2[data_id2 * inner_num + i])*(input1[data_id1 * inner_num + i] - input2[data_id2 * inner_num + i]);
}
tproduct[index] = sum;
out[index] = (inner_num / 2) / (1.0 + sum);
}
}
template <typename Dtype>
void PairwiseLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Dtype* similarity = pairwise_sim_.mutable_gpu_data();
Dtype* dot_product = pairwise_sim_.mutable_gpu_diff();
Dtype* exp_product = loss_.mutable_gpu_diff();
Dtype* loss_data = loss_.mutable_gpu_data();
Dtype* count = temp_.mutable_gpu_data();
Dtype* label = bottom[1]->mutable_gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
int nthreads = outer_num_ * outer_num_;
//calculate similarity matrix according to label
caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, label_dim_,
Dtype(1.0), label, label, Dtype(0.0), similarity);
Dtype* tproduct = tsne_.mutable_gpu_data();
hipLaunchKernelGGL(( TSNEProduct<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, outer_num_, inner_num_, bottom_data, bottom_data, tproduct, dot_product);
caffe_gpu_exp(outer_num_ * outer_num_, dot_product, exp_product);
//calculate pairwise loss
hipLaunchKernelGGL(( ForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, outer_num_, similarity, exp_product,
dot_product, threshold_, count, loss_data);
Dtype loss, count_num;
caffe_gpu_asum(nthreads, loss_data, &loss);
caffe_gpu_asum(nthreads, count, &count_num);
loss /= (count_num > 0 ? count_num : Dtype(1));
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
__global__ void BackwardGPU(const int nthreads, const int outer_num, const int inner_num,
const Dtype* similarity, const Dtype* exp_product,const Dtype* tproduct,const Dtype threshold, Dtype* count, Dtype* diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
if ((inner_num / 2)/(1. + tproduct[index])>threshold) {
diff[index] = 1.0 * (1- (similarity[index] > 0));
}
else {
diff[index] = (
1 / (1 + 1 / exp_product[index]) -
(similarity[index] > 0));
}
diff[index] /= Dtype(outer_num * outer_num);
count[index] = Dtype(1.0);
}
}
template <typename Dtype>
__global__ void TSNEBackward31(const int nthreads, const Dtype scale, const int outer_num, const int inner_num, const Dtype* top_diff, const Dtype* tproduct,const Dtype* similarity, const Dtype threshold, const Dtype* input, Dtype* diff){
CUDA_KERNEL_LOOP(index, nthreads){
for (int i = 0; i < outer_num; i++) {
Dtype temp_diff = top_diff[int(index / inner_num) * outer_num + i] * (-(inner_num / 2) / ((1.0+tproduct[int(index / inner_num) * outer_num + i])*(1.0+tproduct[int(index / inner_num) * outer_num + i])));
diff[index] += temp_diff * 2.0 * (input[index] - input[i * inner_num + index % inner_num]);
}
}
}
template <typename Dtype>
__global__ void TSNEBackward32(const int nthreads, const Dtype scale, const int outer_num, const int inner_num, const Dtype* top_diff, const Dtype* tproduct, const Dtype* similarity, const Dtype threshold, const Dtype* input, Dtype* diff){
CUDA_KERNEL_LOOP(index, nthreads){
for (int i = 0; i < outer_num; i++){
Dtype temp_diff = top_diff[i * outer_num + index / inner_num] * (-(inner_num / 2) / ((1.0+tproduct[i * outer_num + index / inner_num])*(1.0+tproduct[ i * outer_num + index / inner_num])));
diff[index] += temp_diff * 2.0 * (input[index] - input[i * inner_num + index % inner_num]);
}
}
}
template <typename Dtype>
__global__ void CleanBlob(const int nthreads, Dtype* clean_blob)
{
CUDA_KERNEL_LOOP(index, nthreads){
clean_blob[index] = Dtype(0);
}
}
template <typename Dtype>
void PairwiseLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* diff = temp_.mutable_gpu_data();
Dtype* count = temp_.mutable_gpu_diff();
const Dtype* similarity = pairwise_sim_.gpu_data();
const Dtype* exp_product = loss_.gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
int nthreads = outer_num_ * outer_num_;
//calculate diff
hipLaunchKernelGGL(( BackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, outer_num_, inner_num_, similarity,
exp_product,tsne_.gpu_data(),threshold_, count, diff);
//copy to bottom_diff
Dtype count_num;
caffe_gpu_asum(nthreads, count, &count_num);
const Dtype* tproduct = tsne_.gpu_data();
hipLaunchKernelGGL(( TSNEBackward31<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_ * inner_num_, loss_weight_/ (count_num > 0? count_num : Dtype(1)), outer_num_, inner_num_, diff, tproduct, similarity, threshold_, bottom_data, bottom_diff);
hipLaunchKernelGGL(( TSNEBackward32<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_ * inner_num_, loss_weight_/ (count_num > 0? count_num : Dtype(1)), outer_num_, inner_num_, diff, tproduct, similarity, threshold_, bottom_data, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PairwiseLossLayer);
} // namespace caffe
| 51f180de063730f98114ff93060077d1dfd45d4e.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/pairwise_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ForwardGPU(const int nthreads, const int num, const Dtype* similarity,
const Dtype* exp_product, const Dtype* product, const Dtype threshold, Dtype* count, Dtype* loss_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
count[index] = Dtype(1.0);
if((threshold >= 0) && (product[index] >= threshold)){
loss_data[index] = product[index] * (1 - (similarity[index] > 0));
}
else{
loss_data[index] = log(1 + exp_product[index]) - (similarity[index] > 0) * product[index];
}
}
}
template <typename Dtype>
__global__ void TSNEProduct(const int nthreads, const int outer_num, const int inner_num, const Dtype* input1, const Dtype* input2, Dtype* tproduct, Dtype* out){
CUDA_KERNEL_LOOP(index, nthreads){
int data_id1 = index / outer_num;
int data_id2 = index % outer_num;
Dtype sum = 0;
for (int i = 0; i < inner_num; i++){
sum += (input1[data_id1 * inner_num + i] - input2[data_id2 * inner_num + i])*(input1[data_id1 * inner_num + i] - input2[data_id2 * inner_num + i]);
}
tproduct[index] = sum;
out[index] = (inner_num / 2) / (1.0 + sum);
}
}
template <typename Dtype>
void PairwiseLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Dtype* similarity = pairwise_sim_.mutable_gpu_data();
Dtype* dot_product = pairwise_sim_.mutable_gpu_diff();
Dtype* exp_product = loss_.mutable_gpu_diff();
Dtype* loss_data = loss_.mutable_gpu_data();
Dtype* count = temp_.mutable_gpu_data();
Dtype* label = bottom[1]->mutable_gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
int nthreads = outer_num_ * outer_num_;
//calculate similarity matrix according to label
caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, label_dim_,
Dtype(1.0), label, label, Dtype(0.0), similarity);
Dtype* tproduct = tsne_.mutable_gpu_data();
TSNEProduct<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, outer_num_, inner_num_, bottom_data, bottom_data, tproduct, dot_product);
caffe_gpu_exp(outer_num_ * outer_num_, dot_product, exp_product);
//calculate pairwise loss
ForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, outer_num_, similarity, exp_product,
dot_product, threshold_, count, loss_data);
Dtype loss, count_num;
caffe_gpu_asum(nthreads, loss_data, &loss);
caffe_gpu_asum(nthreads, count, &count_num);
loss /= (count_num > 0 ? count_num : Dtype(1));
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
__global__ void BackwardGPU(const int nthreads, const int outer_num, const int inner_num,
const Dtype* similarity, const Dtype* exp_product,const Dtype* tproduct,const Dtype threshold, Dtype* count, Dtype* diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
if ((inner_num / 2)/(1. + tproduct[index])>threshold) {
diff[index] = 1.0 * (1- (similarity[index] > 0));
}
else {
diff[index] = (
1 / (1 + 1 / exp_product[index]) -
(similarity[index] > 0));
}
diff[index] /= Dtype(outer_num * outer_num);
count[index] = Dtype(1.0);
}
}
template <typename Dtype>
__global__ void TSNEBackward31(const int nthreads, const Dtype scale, const int outer_num, const int inner_num, const Dtype* top_diff, const Dtype* tproduct,const Dtype* similarity, const Dtype threshold, const Dtype* input, Dtype* diff){
CUDA_KERNEL_LOOP(index, nthreads){
for (int i = 0; i < outer_num; i++) {
Dtype temp_diff = top_diff[int(index / inner_num) * outer_num + i] * (-(inner_num / 2) / ((1.0+tproduct[int(index / inner_num) * outer_num + i])*(1.0+tproduct[int(index / inner_num) * outer_num + i])));
diff[index] += temp_diff * 2.0 * (input[index] - input[i * inner_num + index % inner_num]);
}
}
}
template <typename Dtype>
__global__ void TSNEBackward32(const int nthreads, const Dtype scale, const int outer_num, const int inner_num, const Dtype* top_diff, const Dtype* tproduct, const Dtype* similarity, const Dtype threshold, const Dtype* input, Dtype* diff){
CUDA_KERNEL_LOOP(index, nthreads){
for (int i = 0; i < outer_num; i++){
Dtype temp_diff = top_diff[i * outer_num + index / inner_num] * (-(inner_num / 2) / ((1.0+tproduct[i * outer_num + index / inner_num])*(1.0+tproduct[ i * outer_num + index / inner_num])));
diff[index] += temp_diff * 2.0 * (input[index] - input[i * inner_num + index % inner_num]);
}
}
}
template <typename Dtype>
__global__ void CleanBlob(const int nthreads, Dtype* clean_blob)
{
CUDA_KERNEL_LOOP(index, nthreads){
clean_blob[index] = Dtype(0);
}
}
template <typename Dtype>
void PairwiseLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* diff = temp_.mutable_gpu_data();
Dtype* count = temp_.mutable_gpu_diff();
const Dtype* similarity = pairwise_sim_.gpu_data();
const Dtype* exp_product = loss_.gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
int nthreads = outer_num_ * outer_num_;
//calculate diff
BackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, outer_num_, inner_num_, similarity,
exp_product,tsne_.gpu_data(),threshold_, count, diff);
//copy to bottom_diff
Dtype count_num;
caffe_gpu_asum(nthreads, count, &count_num);
const Dtype* tproduct = tsne_.gpu_data();
TSNEBackward31<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_ * inner_num_, loss_weight_/ (count_num > 0? count_num : Dtype(1)), outer_num_, inner_num_, diff, tproduct, similarity, threshold_, bottom_data, bottom_diff);
TSNEBackward32<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_ * inner_num_, loss_weight_/ (count_num > 0? count_num : Dtype(1)), outer_num_, inner_num_, diff, tproduct, similarity, threshold_, bottom_data, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PairwiseLossLayer);
} // namespace caffe
|
542c0662e3de072635a61236b4d8c7824979a87c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include "custom_cuda_layers.h"
#include "general_kernels.h"
namespace cg = cooperative_groups;
// Fused attention + softmax
template <int tbSize, int blockStride, int tbSeq>
__global__ void attn_softmax(float* vals,
const float* attn_mask,
int heads,
int seq_length,
int iterations)
{
__shared__ float partialSum[MAX_WARP_NUM];
int warp_num = blockDim.x >> 5;
int iteration_stride = blockDim.x;
int block_width = blockStride * seq_length;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<tbSize> g = cg::tiled_partition<tbSize>(b);
int batch = blockIdx.x;
int row = blockIdx.y;
int max_threads_in_sequence = ::max(seq_length, tbSeq);
int seq_lane = threadIdx.x % max_threads_in_sequence;
int data_offset = batch * (gridDim.y * block_width) + row * block_width +
(threadIdx.x / max_threads_in_sequence) * seq_length;
int mask_offset = batch * seq_length;
int wid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
float4* val_cast = reinterpret_cast<float4*>(vals);
const float4* attn_mask_cast = reinterpret_cast<const float4*>(attn_mask);
float4 data[MAX_THREAD_ITERATIONS];
float max_val = minus_infinity;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
float4 mask = attn_mask_cast[mask_offset + data_id];
data[i] = val_cast[data_offset + data_id];
data[i].x += mask.x;
data[i].y += mask.y;
data[i].z += mask.z;
data[i].w += mask.w;
max_val = (data[i].x > max_val ? data[i].x : max_val);
max_val = (data[i].y > max_val ? data[i].y : max_val);
max_val = (data[i].z > max_val ? data[i].z : max_val);
max_val = (data[i].w > max_val ? data[i].w : max_val);
} else {
data[i].x = minus_infinity;
data[i].y = minus_infinity;
data[i].z = minus_infinity;
data[i].w = minus_infinity;
}
}
for (int i = 1; i < tbSize; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = max_val;
b.sync();
if (lane < warp_num) max_val = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride)
iters = warp_num / (iteration_stride / max_threads_in_sequence);
for (int i = 1; i < iters; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
max_val = g.shfl(max_val, threadIdx.x / tbSize);
}
float sum = 0;
for (int i = 0; i < iterations; i++) {
data[i].x = __expf(data[i].x - max_val);
data[i].y = __expf(data[i].y - max_val);
data[i].z = __expf(data[i].z - max_val);
data[i].w = __expf(data[i].w - max_val);
sum += (data[i].x + data[i].y + data[i].z + data[i].w);
}
for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); }
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = sum;
b.sync();
if (lane < warp_num) sum = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride)
iters = warp_num / (iteration_stride / max_threads_in_sequence);
for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); }
sum = g.shfl(sum, threadIdx.x / tbSize);
}
sum += 1e-6;
for (int i = 0; i < iterations; i++) {
data[i].x /= sum;
data[i].y /= sum;
data[i].z /= sum;
data[i].w /= sum;
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) val_cast[data_offset + data_id] = data[i];
}
}
template <int tbSize, int blockStride, int tbSeq>
__global__ void attn_softmax(__half* vals,
const __half* attn_mask,
int heads,
int seq_length,
int iterations)
{
#if __CUDA_ARCH__ >= 700
__shared__ float partialSum[MAX_WARP_NUM];
int warp_num = blockDim.x >> 5;
int iteration_stride = blockDim.x;
int block_width = blockStride * seq_length;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<tbSize> g = cg::tiled_partition<tbSize>(b);
int batch = blockIdx.x;
int row = blockIdx.y;
int max_threads_in_sequence = ::max(seq_length, tbSeq);
int seq_lane = threadIdx.x % max_threads_in_sequence;
int data_offset = batch * (gridDim.y * block_width) + row * block_width +
(threadIdx.x / max_threads_in_sequence) * seq_length;
int mask_offset = batch * seq_length;
int wid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
float2* val_cast = reinterpret_cast<float2*>(vals);
const float2* attn_mask_cast = reinterpret_cast<const float2*>(attn_mask);
val_cast += data_offset;
attn_mask_cast += mask_offset;
float2 low_data[MAX_THREAD_ITERATIONS];
float2 high_data[MAX_THREAD_ITERATIONS];
float max_val = minus_infinity;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
float2 data = val_cast[data_id];
float2 mask = attn_mask_cast[data_id];
__half2* data_arr = reinterpret_cast<__half2*>(&data);
__half2* mask_arr = reinterpret_cast<__half2*>(&mask);
low_data[i] = __half22float2(data_arr[0]);
high_data[i] = __half22float2(data_arr[1]);
float2 low_mask = __half22float2(mask_arr[0]);
float2 high_mask = __half22float2(mask_arr[1]);
low_data[i].x += low_mask.x;
low_data[i].y += low_mask.y;
high_data[i].x += high_mask.x;
high_data[i].y += high_mask.y;
max_val = (low_data[i].x > max_val ? low_data[i].x : max_val);
max_val = (low_data[i].y > max_val ? low_data[i].y : max_val);
max_val = (high_data[i].x > max_val ? high_data[i].x : max_val);
max_val = (high_data[i].y > max_val ? high_data[i].y : max_val);
}
}
for (int i = 1; i < tbSize; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = max_val;
b.sync();
if (lane < warp_num) max_val = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride)
iters = warp_num / (iteration_stride / max_threads_in_sequence);
for (int i = 1; i < iters; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
max_val = g.shfl(max_val, threadIdx.x / tbSize);
}
float sum = 0;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
low_data[i].x = __expf(low_data[i].x - max_val);
low_data[i].y = __expf(low_data[i].y - max_val);
high_data[i].x = __expf(high_data[i].x - max_val);
high_data[i].y = __expf(high_data[i].y - max_val);
sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y);
}
}
for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); }
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = sum;
b.sync();
if (lane < warp_num) sum = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride)
iters = warp_num / (iteration_stride / max_threads_in_sequence);
for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); }
sum = g.shfl(sum, threadIdx.x / tbSize);
}
sum += 1e-6;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
float2 result_f;
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
low_data[i].x /= sum;
low_data[i].y /= sum;
high_data[i].x /= sum;
high_data[i].y /= sum;
result_h[0] = __float22half2_rn(low_data[i]);
result_h[1] = __float22half2_rn(high_data[i]);
val_cast[data_id] = result_f;
}
}
#endif
}
template <typename T>
void launch_attn_softmax(T*, const T*, int, int, int, hipStream_t);
template <>
void launch_attn_softmax<float>(float* vals,
const float* attn_mask,
int batch_size,
int heads,
int sequence_length,
hipStream_t stream)
{
const int threads = 128;
int seq_length4 = sequence_length / 4;
int block_compute_size =
(seq_length4 < threads ? (int)pow(2.0, floor(log2((float)(threads / seq_length4)))) : 1);
dim3 grid_dim(batch_size, heads * sequence_length / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
int iterations =
(sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads
: MAX_THREAD_ITERATIONS);
if (sequence_length <= 8)
hipLaunchKernelGGL(( attn_softmax<2, (threads / 2), 2>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 16)
hipLaunchKernelGGL(( attn_softmax<4, (threads / 4), 4>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 32)
hipLaunchKernelGGL(( attn_softmax<8, (threads / 8), 8>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 64)
hipLaunchKernelGGL(( attn_softmax<16, (threads / 16), 16>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 128)
hipLaunchKernelGGL(( attn_softmax<32, (threads / 32), 32>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 256)
hipLaunchKernelGGL(( attn_softmax<32, (threads / 64), 64>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else {
const int threads = 256;
block_compute_size =
(seq_length4 < threads ? (int)pow(2.0, floor(log2((float)(threads / seq_length4))))
: 1);
dim3 grid_dim(batch_size, heads * sequence_length / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
iterations =
(sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads
: MAX_THREAD_ITERATIONS);
if (sequence_length <= 512)
hipLaunchKernelGGL(( attn_softmax<32, (threads / 128), 128>), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4))
hipLaunchKernelGGL(( attn_softmax<32, 1, 128>), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, attn_mask, heads, seq_length4, iterations);
else
throw std::runtime_error(
"Unsupport Seq_Length! Check the restriction of the max_threads and "
"max_thread_iterations!");
}
}
template <>
void launch_attn_softmax<__half>(__half* vals,
const __half* attn_mask,
int batch_size,
int heads,
int sequence_length,
hipStream_t stream)
{
const int threads = 128;
int seq_length4 = sequence_length / 4;
int block_compute_size =
(seq_length4 < threads ? (int)pow(2.0, floor(log2((float)(threads / seq_length4)))) : 1);
dim3 grid_dim(batch_size, heads * sequence_length / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
int iterations =
(sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads
: MAX_THREAD_ITERATIONS);
if (sequence_length <= 8)
hipLaunchKernelGGL(( attn_softmax<2, (threads / 2), 2>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 16)
hipLaunchKernelGGL(( attn_softmax<4, (threads / 4), 4>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 32)
hipLaunchKernelGGL(( attn_softmax<8, (threads / 8), 8>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 64)
hipLaunchKernelGGL(( attn_softmax<16, (threads / 16), 16>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 128)
hipLaunchKernelGGL(( attn_softmax<32, (threads / 32), 32>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 256)
hipLaunchKernelGGL(( attn_softmax<32, (threads / 64), 64>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else {
const int threads = 256;
block_compute_size =
(seq_length4 < threads ? (int)pow(2.0, floor(log2((float)(threads / seq_length4))))
: 1);
dim3 grid_dim(batch_size, heads * sequence_length / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
iterations =
(sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads
: MAX_THREAD_ITERATIONS);
if (sequence_length <= 512)
hipLaunchKernelGGL(( attn_softmax<32, (threads / 128), 128>), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4))
hipLaunchKernelGGL(( attn_softmax<32, 1, 128>), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, attn_mask, heads, seq_length4, iterations);
else
throw std::runtime_error(
"Unsupport Seq_Length! Check the restriction of the max_threads and "
"max_thread_iterations!");
}
}
template <typename T, int tbSize, int blockStride>
__global__ void softmax_backward_kernel(T* out_grad, const T* soft_inp, int seq_length)
{
__shared__ float partialSum[MAX_WARP_NUM];
int warp_num = blockDim.x >> 5; // warp-count = num_threads / WARP_SIZE (32)
int iteration_stride = blockDim.x;
int block_width = blockStride * seq_length;
int iterations = (seq_length < (MAX_THREAD_ITERATIONS * iteration_stride)
? (seq_length + iteration_stride - 1) / iteration_stride
: MAX_THREAD_ITERATIONS);
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<tbSize> g = cg::tiled_partition<tbSize>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id >> 5;
int lane = id & 0x1f;
T val_reg[MAX_THREAD_ITERATIONS];
T soft_reg[MAX_THREAD_ITERATIONS];
float grad_reg = 0.0f;
#pragma unroll
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + id;
if (data_id < block_width) {
val_reg[i] = out_grad[row * block_width + data_id];
soft_reg[i] = soft_inp[row * block_width + data_id];
grad_reg += ((float)val_reg[i] *
(float)soft_reg[i]); // if done in half, the multiplication, we may lose
// 2% of accuracy in computation!!
}
}
for (int i = 1; i < tbSize; i *= 2) grad_reg += g.shfl_xor(grad_reg, i);
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = grad_reg;
b.sync();
if (lane < warp_num) grad_reg = partialSum[lane];
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) grad_reg += g.shfl_xor(grad_reg, i);
grad_reg = g.shfl(grad_reg, id / tbSize);
}
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + id;
if (data_id < block_width) {
float temp = (float)soft_reg[i] * ((float)val_reg[i] - grad_reg);
out_grad[row * block_width + data_id] = (T)temp;
}
}
}
template <typename T, int ITERATIONS>
__global__ void softmax_backward_kernel_v2(T* grad /* input & output*/,
const T* output,
int softmax_length)
{
int batch_idx = blockIdx.x * blockDim.y + threadIdx.y;
int offset = batch_idx * softmax_length + threadIdx.x;
grad += offset;
output += offset;
T grad_reg[ITERATIONS];
T output_reg[ITERATIONS];
float sum = 0.0;
#pragma unroll
for (int i = 0; i < ITERATIONS; ++i) {
int curr_idx = threadIdx.x + i * WARP_SIZE;
if (curr_idx < softmax_length) {
grad_reg[i] = grad[i * WARP_SIZE];
output_reg[i] = output[i * WARP_SIZE];
sum += (float)grad_reg[i] * (float)output_reg[i];
}
}
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i);
#pragma unroll
for (int i = 0; i < ITERATIONS; ++i) {
int curr_idx = threadIdx.x + i * WARP_SIZE;
if (curr_idx < softmax_length)
grad[i * WARP_SIZE] = (float)output_reg[i] * ((float)grad_reg[i] - sum);
}
}
template <typename T>
void launch_attn_softmax_backward_v2(T* out_grad,
const T* soft_inp,
int batch_size,
int heads,
int seq_length,
hipStream_t stream)
{
const int warps_per_block = 4;
dim3 grid_dim(batch_size * heads * seq_length / warps_per_block);
dim3 block_dim(WARP_SIZE, warps_per_block);
if (seq_length <= 32)
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 1>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
else if (seq_length <= 64)
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 2>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
else if (seq_length <= 128)
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 4>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
else if (seq_length <= 256)
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 8>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
else if (seq_length <= 384)
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 12>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
else if (seq_length <= 512)
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 16>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
else if (seq_length <= 768)
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 24>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
else if (seq_length <= 1024)
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 32>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
else if (seq_length <= 2048)
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 64>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
else
throw std::runtime_error(
std::string("Special sequence length found in softmax backward, seq_length: ") +
std::to_string(seq_length));
}
template void launch_attn_softmax_backward_v2<__half>(__half* out_grad,
const __half* soft_inp,
int batch_size,
int heads,
int seq_length,
hipStream_t stream);
template void launch_attn_softmax_backward_v2<float>(float* out_grad,
const float* soft_inp,
int batch_size,
int heads,
int seq_length,
hipStream_t stream);
| 542c0662e3de072635a61236b4d8c7824979a87c.cu | #include <math.h>
#include "custom_cuda_layers.h"
#include "general_kernels.h"
namespace cg = cooperative_groups;
// Fused attention + softmax
template <int tbSize, int blockStride, int tbSeq>
__global__ void attn_softmax(float* vals,
const float* attn_mask,
int heads,
int seq_length,
int iterations)
{
__shared__ float partialSum[MAX_WARP_NUM];
int warp_num = blockDim.x >> 5;
int iteration_stride = blockDim.x;
int block_width = blockStride * seq_length;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<tbSize> g = cg::tiled_partition<tbSize>(b);
int batch = blockIdx.x;
int row = blockIdx.y;
int max_threads_in_sequence = std::max(seq_length, tbSeq);
int seq_lane = threadIdx.x % max_threads_in_sequence;
int data_offset = batch * (gridDim.y * block_width) + row * block_width +
(threadIdx.x / max_threads_in_sequence) * seq_length;
int mask_offset = batch * seq_length;
int wid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
float4* val_cast = reinterpret_cast<float4*>(vals);
const float4* attn_mask_cast = reinterpret_cast<const float4*>(attn_mask);
float4 data[MAX_THREAD_ITERATIONS];
float max_val = minus_infinity;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
float4 mask = attn_mask_cast[mask_offset + data_id];
data[i] = val_cast[data_offset + data_id];
data[i].x += mask.x;
data[i].y += mask.y;
data[i].z += mask.z;
data[i].w += mask.w;
max_val = (data[i].x > max_val ? data[i].x : max_val);
max_val = (data[i].y > max_val ? data[i].y : max_val);
max_val = (data[i].z > max_val ? data[i].z : max_val);
max_val = (data[i].w > max_val ? data[i].w : max_val);
} else {
data[i].x = minus_infinity;
data[i].y = minus_infinity;
data[i].z = minus_infinity;
data[i].w = minus_infinity;
}
}
for (int i = 1; i < tbSize; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = max_val;
b.sync();
if (lane < warp_num) max_val = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride)
iters = warp_num / (iteration_stride / max_threads_in_sequence);
for (int i = 1; i < iters; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
max_val = g.shfl(max_val, threadIdx.x / tbSize);
}
float sum = 0;
for (int i = 0; i < iterations; i++) {
data[i].x = __expf(data[i].x - max_val);
data[i].y = __expf(data[i].y - max_val);
data[i].z = __expf(data[i].z - max_val);
data[i].w = __expf(data[i].w - max_val);
sum += (data[i].x + data[i].y + data[i].z + data[i].w);
}
for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); }
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = sum;
b.sync();
if (lane < warp_num) sum = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride)
iters = warp_num / (iteration_stride / max_threads_in_sequence);
for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); }
sum = g.shfl(sum, threadIdx.x / tbSize);
}
sum += 1e-6;
for (int i = 0; i < iterations; i++) {
data[i].x /= sum;
data[i].y /= sum;
data[i].z /= sum;
data[i].w /= sum;
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) val_cast[data_offset + data_id] = data[i];
}
}
template <int tbSize, int blockStride, int tbSeq>
__global__ void attn_softmax(__half* vals,
const __half* attn_mask,
int heads,
int seq_length,
int iterations)
{
#if __CUDA_ARCH__ >= 700
__shared__ float partialSum[MAX_WARP_NUM];
int warp_num = blockDim.x >> 5;
int iteration_stride = blockDim.x;
int block_width = blockStride * seq_length;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<tbSize> g = cg::tiled_partition<tbSize>(b);
int batch = blockIdx.x;
int row = blockIdx.y;
int max_threads_in_sequence = std::max(seq_length, tbSeq);
int seq_lane = threadIdx.x % max_threads_in_sequence;
int data_offset = batch * (gridDim.y * block_width) + row * block_width +
(threadIdx.x / max_threads_in_sequence) * seq_length;
int mask_offset = batch * seq_length;
int wid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
float2* val_cast = reinterpret_cast<float2*>(vals);
const float2* attn_mask_cast = reinterpret_cast<const float2*>(attn_mask);
val_cast += data_offset;
attn_mask_cast += mask_offset;
float2 low_data[MAX_THREAD_ITERATIONS];
float2 high_data[MAX_THREAD_ITERATIONS];
float max_val = minus_infinity;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
float2 data = val_cast[data_id];
float2 mask = attn_mask_cast[data_id];
__half2* data_arr = reinterpret_cast<__half2*>(&data);
__half2* mask_arr = reinterpret_cast<__half2*>(&mask);
low_data[i] = __half22float2(data_arr[0]);
high_data[i] = __half22float2(data_arr[1]);
float2 low_mask = __half22float2(mask_arr[0]);
float2 high_mask = __half22float2(mask_arr[1]);
low_data[i].x += low_mask.x;
low_data[i].y += low_mask.y;
high_data[i].x += high_mask.x;
high_data[i].y += high_mask.y;
max_val = (low_data[i].x > max_val ? low_data[i].x : max_val);
max_val = (low_data[i].y > max_val ? low_data[i].y : max_val);
max_val = (high_data[i].x > max_val ? high_data[i].x : max_val);
max_val = (high_data[i].y > max_val ? high_data[i].y : max_val);
}
}
for (int i = 1; i < tbSize; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = max_val;
b.sync();
if (lane < warp_num) max_val = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride)
iters = warp_num / (iteration_stride / max_threads_in_sequence);
for (int i = 1; i < iters; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
max_val = g.shfl(max_val, threadIdx.x / tbSize);
}
float sum = 0;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
low_data[i].x = __expf(low_data[i].x - max_val);
low_data[i].y = __expf(low_data[i].y - max_val);
high_data[i].x = __expf(high_data[i].x - max_val);
high_data[i].y = __expf(high_data[i].y - max_val);
sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y);
}
}
for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); }
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = sum;
b.sync();
if (lane < warp_num) sum = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride)
iters = warp_num / (iteration_stride / max_threads_in_sequence);
for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); }
sum = g.shfl(sum, threadIdx.x / tbSize);
}
sum += 1e-6;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
float2 result_f;
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
low_data[i].x /= sum;
low_data[i].y /= sum;
high_data[i].x /= sum;
high_data[i].y /= sum;
result_h[0] = __float22half2_rn(low_data[i]);
result_h[1] = __float22half2_rn(high_data[i]);
val_cast[data_id] = result_f;
}
}
#endif
}
template <typename T>
void launch_attn_softmax(T*, const T*, int, int, int, cudaStream_t);
template <>
void launch_attn_softmax<float>(float* vals,
const float* attn_mask,
int batch_size,
int heads,
int sequence_length,
cudaStream_t stream)
{
const int threads = 128;
int seq_length4 = sequence_length / 4;
int block_compute_size =
(seq_length4 < threads ? (int)pow(2.0, floor(log2((float)(threads / seq_length4)))) : 1);
dim3 grid_dim(batch_size, heads * sequence_length / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
int iterations =
(sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads
: MAX_THREAD_ITERATIONS);
if (sequence_length <= 8)
attn_softmax<2, (threads / 2), 2>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 16)
attn_softmax<4, (threads / 4), 4>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 32)
attn_softmax<8, (threads / 8), 8>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 64)
attn_softmax<16, (threads / 16), 16>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 128)
attn_softmax<32, (threads / 32), 32>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 256)
attn_softmax<32, (threads / 64), 64>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else {
const int threads = 256;
block_compute_size =
(seq_length4 < threads ? (int)pow(2.0, floor(log2((float)(threads / seq_length4))))
: 1);
dim3 grid_dim(batch_size, heads * sequence_length / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
iterations =
(sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads
: MAX_THREAD_ITERATIONS);
if (sequence_length <= 512)
attn_softmax<32, (threads / 128), 128><<<grid_dim, block_dim, 0, stream>>>(
vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4))
attn_softmax<32, 1, 128><<<grid_dim, block_dim, 0, stream>>>(
vals, attn_mask, heads, seq_length4, iterations);
else
throw std::runtime_error(
"Unsupport Seq_Length! Check the restriction of the max_threads and "
"max_thread_iterations!");
}
}
template <>
void launch_attn_softmax<__half>(__half* vals,
const __half* attn_mask,
int batch_size,
int heads,
int sequence_length,
cudaStream_t stream)
{
const int threads = 128;
int seq_length4 = sequence_length / 4;
int block_compute_size =
(seq_length4 < threads ? (int)pow(2.0, floor(log2((float)(threads / seq_length4)))) : 1);
dim3 grid_dim(batch_size, heads * sequence_length / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
int iterations =
(sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads
: MAX_THREAD_ITERATIONS);
if (sequence_length <= 8)
attn_softmax<2, (threads / 2), 2>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 16)
attn_softmax<4, (threads / 4), 4>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 32)
attn_softmax<8, (threads / 8), 8>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 64)
attn_softmax<16, (threads / 16), 16>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 128)
attn_softmax<32, (threads / 32), 32>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 256)
attn_softmax<32, (threads / 64), 64>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else {
const int threads = 256;
block_compute_size =
(seq_length4 < threads ? (int)pow(2.0, floor(log2((float)(threads / seq_length4))))
: 1);
dim3 grid_dim(batch_size, heads * sequence_length / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
iterations =
(sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads
: MAX_THREAD_ITERATIONS);
if (sequence_length <= 512)
attn_softmax<32, (threads / 128), 128><<<grid_dim, block_dim, 0, stream>>>(
vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4))
attn_softmax<32, 1, 128><<<grid_dim, block_dim, 0, stream>>>(
vals, attn_mask, heads, seq_length4, iterations);
else
throw std::runtime_error(
"Unsupport Seq_Length! Check the restriction of the max_threads and "
"max_thread_iterations!");
}
}
template <typename T, int tbSize, int blockStride>
__global__ void softmax_backward_kernel(T* out_grad, const T* soft_inp, int seq_length)
{
__shared__ float partialSum[MAX_WARP_NUM];
int warp_num = blockDim.x >> 5; // warp-count = num_threads / WARP_SIZE (32)
int iteration_stride = blockDim.x;
int block_width = blockStride * seq_length;
int iterations = (seq_length < (MAX_THREAD_ITERATIONS * iteration_stride)
? (seq_length + iteration_stride - 1) / iteration_stride
: MAX_THREAD_ITERATIONS);
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<tbSize> g = cg::tiled_partition<tbSize>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id >> 5;
int lane = id & 0x1f;
T val_reg[MAX_THREAD_ITERATIONS];
T soft_reg[MAX_THREAD_ITERATIONS];
float grad_reg = 0.0f;
#pragma unroll
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + id;
if (data_id < block_width) {
val_reg[i] = out_grad[row * block_width + data_id];
soft_reg[i] = soft_inp[row * block_width + data_id];
grad_reg += ((float)val_reg[i] *
(float)soft_reg[i]); // if done in half, the multiplication, we may lose
// 2% of accuracy in computation!!
}
}
for (int i = 1; i < tbSize; i *= 2) grad_reg += g.shfl_xor(grad_reg, i);
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = grad_reg;
b.sync();
if (lane < warp_num) grad_reg = partialSum[lane];
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) grad_reg += g.shfl_xor(grad_reg, i);
grad_reg = g.shfl(grad_reg, id / tbSize);
}
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + id;
if (data_id < block_width) {
float temp = (float)soft_reg[i] * ((float)val_reg[i] - grad_reg);
out_grad[row * block_width + data_id] = (T)temp;
}
}
}
template <typename T, int ITERATIONS>
__global__ void softmax_backward_kernel_v2(T* grad /* input & output*/,
const T* output,
int softmax_length)
{
int batch_idx = blockIdx.x * blockDim.y + threadIdx.y;
int offset = batch_idx * softmax_length + threadIdx.x;
grad += offset;
output += offset;
T grad_reg[ITERATIONS];
T output_reg[ITERATIONS];
float sum = 0.0;
#pragma unroll
for (int i = 0; i < ITERATIONS; ++i) {
int curr_idx = threadIdx.x + i * WARP_SIZE;
if (curr_idx < softmax_length) {
grad_reg[i] = grad[i * WARP_SIZE];
output_reg[i] = output[i * WARP_SIZE];
sum += (float)grad_reg[i] * (float)output_reg[i];
}
}
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i);
#pragma unroll
for (int i = 0; i < ITERATIONS; ++i) {
int curr_idx = threadIdx.x + i * WARP_SIZE;
if (curr_idx < softmax_length)
grad[i * WARP_SIZE] = (float)output_reg[i] * ((float)grad_reg[i] - sum);
}
}
template <typename T>
void launch_attn_softmax_backward_v2(T* out_grad,
const T* soft_inp,
int batch_size,
int heads,
int seq_length,
cudaStream_t stream)
{
const int warps_per_block = 4;
dim3 grid_dim(batch_size * heads * seq_length / warps_per_block);
dim3 block_dim(WARP_SIZE, warps_per_block);
if (seq_length <= 32)
softmax_backward_kernel_v2<T, 1>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
else if (seq_length <= 64)
softmax_backward_kernel_v2<T, 2>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
else if (seq_length <= 128)
softmax_backward_kernel_v2<T, 4>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
else if (seq_length <= 256)
softmax_backward_kernel_v2<T, 8>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
else if (seq_length <= 384)
softmax_backward_kernel_v2<T, 12>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
else if (seq_length <= 512)
softmax_backward_kernel_v2<T, 16>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
else if (seq_length <= 768)
softmax_backward_kernel_v2<T, 24>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
else if (seq_length <= 1024)
softmax_backward_kernel_v2<T, 32>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
else if (seq_length <= 2048)
softmax_backward_kernel_v2<T, 64>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
else
throw std::runtime_error(
std::string("Special sequence length found in softmax backward, seq_length: ") +
std::to_string(seq_length));
}
template void launch_attn_softmax_backward_v2<__half>(__half* out_grad,
const __half* soft_inp,
int batch_size,
int heads,
int seq_length,
cudaStream_t stream);
template void launch_attn_softmax_backward_v2<float>(float* out_grad,
const float* soft_inp,
int batch_size,
int heads,
int seq_length,
cudaStream_t stream);
|
9a643f006b23d6159b46e83169d72d941b1835e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Indice2D.h"
#include "Indice1D.h"
#include "cudaTools.h"
#include <stdio.h>
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
static __device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* output : void required !!
*/
__global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n)
{
secondaire(ptrDevV1, ptrDevV2, ptrDevW, n); // pas necessaire, just for fun
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n)
{
const int NB_THREAD=Indice2D::nbThread();
const int TID=Indice2D::tid();
// Debug, facultatif
if (TID==0)
{
printf("Coucou from device tid = %d", TID); //required Device::synchronize(); after the call of kernel
}
int s = TID;
while (s<n)
{
ptrDevW[s] = ptrDevV1[s] + ptrDevV2[s];
s = s + NB_THREAD;
}
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 9a643f006b23d6159b46e83169d72d941b1835e9.cu | #include "Indice2D.h"
#include "Indice1D.h"
#include "cudaTools.h"
#include <stdio.h>
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
static __device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* output : void required !!
*/
__global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n)
{
secondaire(ptrDevV1, ptrDevV2, ptrDevW, n); // pas necessaire, just for fun
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n)
{
const int NB_THREAD=Indice2D::nbThread();
const int TID=Indice2D::tid();
// Debug, facultatif
if (TID==0)
{
printf("Coucou from device tid = %d", TID); //required Device::synchronize(); after the call of kernel
}
int s = TID;
while (s<n)
{
ptrDevW[s] = ptrDevV1[s] + ptrDevV2[s];
s = s + NB_THREAD;
}
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
c3fd52386336b72a9e312d0ebe7be7b3e1a85631.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"cuda_runtime.h"
#include"device_launch_parameters.h"
#include<stdlib.h>
#include<stdio.h>
#include<string.h>
__global__ void multipleStrings(char* a , char* b,int size)
{
int i = threadIdx.x * size;
int j = 0;
for(j=0;j<size;j++)
{
b[i+j] = a[j];
}
}
int main()
{
hipError_t error;
int n;
int size;
printf("Enter the value of n \n");
scanf("%d",&n);
printf("Enter the size of the string \n");
scanf("%d",&size);
char *a = (char*)malloc(sizeof(char)*(size+1));
printf("Enter the string \n");
scanf("%s",a);
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
char *b = (char*)malloc(sizeof(char)*(n*size+1));
char *d_a , *d_b;
int size1 = sizeof(char)*(size+1);
int size2 = sizeof(char)*(size*n+1);
hipMalloc((void**)&d_a,size1);
hipMalloc((void**)&d_b,size2);
error = hipMemcpy(d_a,a,sizeof(char)*(size+1),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( multipleStrings), dim3(1),dim3(n), 0, 0, d_a,d_b,size);
hipMemcpy(b,d_b,size2,hipMemcpyDeviceToHost);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,start,stop);
int l = strlen(b);
printf("string = %s \n",b);
printf("Time taken = %f \n",elapsedTime);
hipFree(d_a);
hipFree(d_b);
}
| c3fd52386336b72a9e312d0ebe7be7b3e1a85631.cu | #include"cuda_runtime.h"
#include"device_launch_parameters.h"
#include<stdlib.h>
#include<stdio.h>
#include<string.h>
__global__ void multipleStrings(char* a , char* b,int size)
{
int i = threadIdx.x * size;
int j = 0;
for(j=0;j<size;j++)
{
b[i+j] = a[j];
}
}
int main()
{
cudaError_t error;
int n;
int size;
printf("Enter the value of n \n");
scanf("%d",&n);
printf("Enter the size of the string \n");
scanf("%d",&size);
char *a = (char*)malloc(sizeof(char)*(size+1));
printf("Enter the string \n");
scanf("%s",a);
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
char *b = (char*)malloc(sizeof(char)*(n*size+1));
char *d_a , *d_b;
int size1 = sizeof(char)*(size+1);
int size2 = sizeof(char)*(size*n+1);
cudaMalloc((void**)&d_a,size1);
cudaMalloc((void**)&d_b,size2);
error = cudaMemcpy(d_a,a,sizeof(char)*(size+1),cudaMemcpyHostToDevice);
multipleStrings<<<1,n>>>(d_a,d_b,size);
cudaMemcpy(b,d_b,size2,cudaMemcpyDeviceToHost);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,start,stop);
int l = strlen(b);
printf("string = %s \n",b);
printf("Time taken = %f \n",elapsedTime);
cudaFree(d_a);
cudaFree(d_b);
}
|
1b79553b14b99fd8f3d2f2102999b2386c49b8e9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matrix_multiplication.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *matrix_1 = NULL;
hipMalloc(&matrix_1, XSIZE*YSIZE);
int *matrix_2 = NULL;
hipMalloc(&matrix_2, XSIZE*YSIZE);
int *matrix_r = NULL;
hipMalloc(&matrix_r, XSIZE*YSIZE);
int m = 2;
int n = XSIZE*YSIZE;
int p = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
matrix_multiplication), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix_1,matrix_2,matrix_r,m,n,p);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
matrix_multiplication), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix_1,matrix_2,matrix_r,m,n,p);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
matrix_multiplication), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix_1,matrix_2,matrix_r,m,n,p);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 1b79553b14b99fd8f3d2f2102999b2386c49b8e9.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "matrix_multiplication.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *matrix_1 = NULL;
cudaMalloc(&matrix_1, XSIZE*YSIZE);
int *matrix_2 = NULL;
cudaMalloc(&matrix_2, XSIZE*YSIZE);
int *matrix_r = NULL;
cudaMalloc(&matrix_r, XSIZE*YSIZE);
int m = 2;
int n = XSIZE*YSIZE;
int p = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
matrix_multiplication<<<gridBlock,threadBlock>>>(matrix_1,matrix_2,matrix_r,m,n,p);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
matrix_multiplication<<<gridBlock,threadBlock>>>(matrix_1,matrix_2,matrix_r,m,n,p);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
matrix_multiplication<<<gridBlock,threadBlock>>>(matrix_1,matrix_2,matrix_r,m,n,p);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
cfd8b1a6ef2bdb46f0595ea97579107afe364ddb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void shmem_max_reduce_kernel(float * d_max, const float * d_in)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t,
// shmem>>>
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_in[myId];
// __syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] = max(sdata[tid],sdata[tid+s]);
/*sdata[tid] += sdata[tid + s];*/
}
__syncthreads(); // make sure all adds at one stage are
//done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_max[blockIdx.x] = sdata[0];
}
} | cfd8b1a6ef2bdb46f0595ea97579107afe364ddb.cu | #include "includes.h"
__global__ void shmem_max_reduce_kernel(float * d_max, const float * d_in)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t,
// shmem>>>
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_in[myId];
// __syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] = max(sdata[tid],sdata[tid+s]);
/*sdata[tid] += sdata[tid + s];*/
}
__syncthreads(); // make sure all adds at one stage are
//done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_max[blockIdx.x] = sdata[0];
}
} |
34aa695ea2f4e44d9c19f56fa39facf267775102.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* classifier_blk.cu
*
* A CUDA kernel for accelerating a fully-connected neural network layer.
*
* This kernel treats the inputs, weights, and outputs of the layer as
* block matrices in order to facilitate better data reuse.
*/
#include <iostream>
#include <string>
using namespace std;
#ifndef Ni
#define Ni 4096
#endif
#ifndef Nn
#define Nn 1024
#endif
#ifndef Nb
#define Nb 1
#endif
#define DEBUG (false)
/* The weights of the layer*/
__device__ float d_weights[Nn][Ni];
__device__ float d_inputs[Ni][Nb];
__device__ float d_outputs[Nn][Nb];
#define BLK_SZ (16)
/* Performs a matrix multiply. Assumes kernel block size is (BLK_SZ, BLK_SZ, 1)*/
__device__ void matmul_blk(float a[BLK_SZ][BLK_SZ], float b[BLK_SZ][BLK_SZ], float c[BLK_SZ][BLK_SZ])
{
const int row = threadIdx.x;
const int col = threadIdx.y;
for(int i = 0; i < BLK_SZ; i++)
{
c[row][col] += a[row][i] * b[i][col];
}
}
/* Performs C = A + B. Assumes kernel block size is (BLK_SZ, BLK_SZ, 1)*/
__device__ void matadd_blk(float a[BLK_SZ][BLK_SZ], float b[BLK_SZ][BLK_SZ], float c[BLK_SZ][BLK_SZ])
{
const int row = threadIdx.x;
const int col = threadIdx.y;
c[row][col] = a[row][col] + b[row][col];
}
/* Atomically adds the matrix mat into dst. dst should be a pointer to a block matrix whose elements are square matrices of size BLK_SZ*/
__device__ void matadd_blk_global(float *dst, int2 dstDim, float mat[BLK_SZ][BLK_SZ], int blkX, int blkY)
{
const int row = threadIdx.x;
const int col = threadIdx.y;
atomicAdd(&dst[dstDim.y*(BLK_SZ*blkX + row) + (BLK_SZ*blkY + col)], mat[row][col]);
}
/* Reads the matrix mat from src. src should be a pointer to a block matrix whose elements are square matrices of size BLK_SZ*/
__device__ void matread_blk(float *src, int2 srcDim, float mat[BLK_SZ][BLK_SZ], int blkX, int blkY)
{
const int row = threadIdx.x;
const int col = threadIdx.y;
mat[row][col] = src[srcDim.y*(BLK_SZ*blkX + row) + (BLK_SZ*blkY + col)];
}
/* Writes the matrix mat into dst. dst should be a pointer to a block matrix whose elements are square matrices of size BLK_SZ*/
__device__ void matwrite_blk(float *dst, int2 srcDim, float mat[BLK_SZ][BLK_SZ], int blkX, int blkY)
{
const int row = threadIdx.x;
const int col = threadIdx.y;
dst[srcDim.y*(BLK_SZ*blkX + row) + (BLK_SZ*blkY + col)] = mat[row][col];
}
/* Sets all elements of mat to 0 */
__device__ void blk_zero(float mat[BLK_SZ][BLK_SZ])
{
const int row = threadIdx.x;
const int col = threadIdx.y;
mat[row][col] = 0;
}
__device__ void printBlock(float mat[BLK_SZ][BLK_SZ], int blkX, int blkY)
{
if(blockIdx.x == blkX && blockIdx.y == blkY)
{
printf("mat[%d][%d] = %f\n", threadIdx.x, threadIdx.y, mat[threadIdx.x][threadIdx.y]);
}
}
__global__ void classify()
{
const int2 weightDim = make_int2(Nn, Ni);
const int2 inDim = make_int2(Ni, Nb);
const int2 outDim = make_int2(Nn, Nb);
const int blkX = blockIdx.x;
const int blkY = blockIdx.y;
__shared__ float weightBlk[BLK_SZ][BLK_SZ];
__shared__ float inBlk[BLK_SZ][BLK_SZ];
__shared__ float outBlk[BLK_SZ][BLK_SZ];
matread_blk((float*)d_inputs, inDim, inBlk, blkX, blkY);
for(int i = 0; i < (weightDim.x / BLK_SZ); i++)
{
matread_blk((float*)d_weights, weightDim, weightBlk, i, blkX);
blk_zero(outBlk);
__threadfence();
matmul_blk(weightBlk, inBlk, outBlk);
matadd_blk_global((float*)d_outputs, outDim, outBlk, i, blkY);
}
}
void randomizeArray(float *data, int len)
{
for(int i = 0; i < len; i++)
{
data[i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX) * 16.0f - 8.0f;
}
}
int main(int argc, char **argv)
{
const int2 in_dim = make_int2(Ni, Nb); // The dimensions of the input matrix
const int in_size = sizeof(float) * in_dim.x * in_dim.y; // Total size of input buffer in bytes
const int2 out_dim = make_int2(Nn, Nb); // The dimensions of the output matrix
const int out_size = sizeof(float) * out_dim.x * out_dim.y; // Total size of output buffer in bytes
const dim3 grid_size(in_dim.x/16,in_dim.y/16,1);
const dim3 block_size(16,16,1);
float *h_in_data = new float[in_dim.x * in_dim.y]; // Input data on host
float *h_out_data = new float[out_dim.x * out_dim.y]; // Output on device
float *h_random_weights = new float[Ni*Nn];
// Make some random data.
randomizeArray(h_random_weights, Ni*Nn);
randomizeArray(h_in_data, in_dim.x * in_dim.y);
hipMemcpyToSymbol(d_weights, h_random_weights, Nn*Ni*sizeof(float));
hipMemcpyToSymbol(d_inputs, h_in_data, in_size); // Give the GPU our input data.
hipLaunchKernelGGL(( classify), dim3(grid_size), dim3(block_size), 0, 0, );
hipDeviceSynchronize();
hipMemcpyFromSymbol(h_out_data, d_outputs, out_size); // Retrieve the neuron outputs.
if(DEBUG)
{
for(int i = 0; i < Nn; i++)
{
for(int j = 0; j < Ni; j++)
{
printf("%f ", h_random_weights[i*Ni + j]);
}
printf("\n");
}
printf("\n");
for(int i = 0; i < in_dim.x; i++)
{
for(int j = 0; j < in_dim.y; j++)
{
printf("%f ", h_in_data[i*in_dim.y + j]);
}
printf("\n");
}
printf("\n");
for(int i = 0; i < out_dim.x; i++)
{
for(int j = 0; j < out_dim.y; j++)
{
printf("%f ", h_out_data[i*out_dim.y + j]);
}
printf("\n");
}
}
return 0;
}
| 34aa695ea2f4e44d9c19f56fa39facf267775102.cu | /**
* classifier_blk.cu
*
* A CUDA kernel for accelerating a fully-connected neural network layer.
*
* This kernel treats the inputs, weights, and outputs of the layer as
* block matrices in order to facilitate better data reuse.
*/
#include <iostream>
#include <string>
using namespace std;
#ifndef Ni
#define Ni 4096
#endif
#ifndef Nn
#define Nn 1024
#endif
#ifndef Nb
#define Nb 1
#endif
#define DEBUG (false)
/* The weights of the layer*/
__device__ float d_weights[Nn][Ni];
__device__ float d_inputs[Ni][Nb];
__device__ float d_outputs[Nn][Nb];
#define BLK_SZ (16)
/* Performs a matrix multiply. Assumes kernel block size is (BLK_SZ, BLK_SZ, 1)*/
__device__ void matmul_blk(float a[BLK_SZ][BLK_SZ], float b[BLK_SZ][BLK_SZ], float c[BLK_SZ][BLK_SZ])
{
const int row = threadIdx.x;
const int col = threadIdx.y;
for(int i = 0; i < BLK_SZ; i++)
{
c[row][col] += a[row][i] * b[i][col];
}
}
/* Performs C = A + B. Assumes kernel block size is (BLK_SZ, BLK_SZ, 1)*/
__device__ void matadd_blk(float a[BLK_SZ][BLK_SZ], float b[BLK_SZ][BLK_SZ], float c[BLK_SZ][BLK_SZ])
{
const int row = threadIdx.x;
const int col = threadIdx.y;
c[row][col] = a[row][col] + b[row][col];
}
/* Atomically adds the matrix mat into dst. dst should be a pointer to a block matrix whose elements are square matrices of size BLK_SZ*/
__device__ void matadd_blk_global(float *dst, int2 dstDim, float mat[BLK_SZ][BLK_SZ], int blkX, int blkY)
{
const int row = threadIdx.x;
const int col = threadIdx.y;
atomicAdd(&dst[dstDim.y*(BLK_SZ*blkX + row) + (BLK_SZ*blkY + col)], mat[row][col]);
}
/* Reads the matrix mat from src. src should be a pointer to a block matrix whose elements are square matrices of size BLK_SZ*/
__device__ void matread_blk(float *src, int2 srcDim, float mat[BLK_SZ][BLK_SZ], int blkX, int blkY)
{
const int row = threadIdx.x;
const int col = threadIdx.y;
mat[row][col] = src[srcDim.y*(BLK_SZ*blkX + row) + (BLK_SZ*blkY + col)];
}
/* Writes the matrix mat into dst. dst should be a pointer to a block matrix whose elements are square matrices of size BLK_SZ*/
__device__ void matwrite_blk(float *dst, int2 srcDim, float mat[BLK_SZ][BLK_SZ], int blkX, int blkY)
{
const int row = threadIdx.x;
const int col = threadIdx.y;
dst[srcDim.y*(BLK_SZ*blkX + row) + (BLK_SZ*blkY + col)] = mat[row][col];
}
/* Sets all elements of mat to 0 */
__device__ void blk_zero(float mat[BLK_SZ][BLK_SZ])
{
const int row = threadIdx.x;
const int col = threadIdx.y;
mat[row][col] = 0;
}
__device__ void printBlock(float mat[BLK_SZ][BLK_SZ], int blkX, int blkY)
{
if(blockIdx.x == blkX && blockIdx.y == blkY)
{
printf("mat[%d][%d] = %f\n", threadIdx.x, threadIdx.y, mat[threadIdx.x][threadIdx.y]);
}
}
__global__ void classify()
{
const int2 weightDim = make_int2(Nn, Ni);
const int2 inDim = make_int2(Ni, Nb);
const int2 outDim = make_int2(Nn, Nb);
const int blkX = blockIdx.x;
const int blkY = blockIdx.y;
__shared__ float weightBlk[BLK_SZ][BLK_SZ];
__shared__ float inBlk[BLK_SZ][BLK_SZ];
__shared__ float outBlk[BLK_SZ][BLK_SZ];
matread_blk((float*)d_inputs, inDim, inBlk, blkX, blkY);
for(int i = 0; i < (weightDim.x / BLK_SZ); i++)
{
matread_blk((float*)d_weights, weightDim, weightBlk, i, blkX);
blk_zero(outBlk);
__threadfence();
matmul_blk(weightBlk, inBlk, outBlk);
matadd_blk_global((float*)d_outputs, outDim, outBlk, i, blkY);
}
}
void randomizeArray(float *data, int len)
{
for(int i = 0; i < len; i++)
{
data[i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX) * 16.0f - 8.0f;
}
}
int main(int argc, char **argv)
{
const int2 in_dim = make_int2(Ni, Nb); // The dimensions of the input matrix
const int in_size = sizeof(float) * in_dim.x * in_dim.y; // Total size of input buffer in bytes
const int2 out_dim = make_int2(Nn, Nb); // The dimensions of the output matrix
const int out_size = sizeof(float) * out_dim.x * out_dim.y; // Total size of output buffer in bytes
const dim3 grid_size(in_dim.x/16,in_dim.y/16,1);
const dim3 block_size(16,16,1);
float *h_in_data = new float[in_dim.x * in_dim.y]; // Input data on host
float *h_out_data = new float[out_dim.x * out_dim.y]; // Output on device
float *h_random_weights = new float[Ni*Nn];
// Make some random data.
randomizeArray(h_random_weights, Ni*Nn);
randomizeArray(h_in_data, in_dim.x * in_dim.y);
cudaMemcpyToSymbol(d_weights, h_random_weights, Nn*Ni*sizeof(float));
cudaMemcpyToSymbol(d_inputs, h_in_data, in_size); // Give the GPU our input data.
classify<<<grid_size, block_size>>>();
cudaDeviceSynchronize();
cudaMemcpyFromSymbol(h_out_data, d_outputs, out_size); // Retrieve the neuron outputs.
if(DEBUG)
{
for(int i = 0; i < Nn; i++)
{
for(int j = 0; j < Ni; j++)
{
printf("%f ", h_random_weights[i*Ni + j]);
}
printf("\n");
}
printf("\n");
for(int i = 0; i < in_dim.x; i++)
{
for(int j = 0; j < in_dim.y; j++)
{
printf("%f ", h_in_data[i*in_dim.y + j]);
}
printf("\n");
}
printf("\n");
for(int i = 0; i < out_dim.x; i++)
{
for(int j = 0; j < out_dim.y; j++)
{
printf("%f ", h_out_data[i*out_dim.y + j]);
}
printf("\n");
}
}
return 0;
}
|
ed94beb25f344fa7d062cc3ae0954ee3fbdb2d08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "config.h"
#include "queue.h"
#ifdef _LOCAL_HEAP_QUEUE
#include <stdio.h>
#include <hipcub/hipcub.hpp>
#include "util.cuh"
#include "assert.h"
#include "local_heap_queue.cuh"
DECLARE_GPU_TIMER(heapify);
DECLARE_GPU_TIMER(heapify_down);
// base queue
__device__ queue_item *fel;
__device__ int dequeue_count;
__device__ int enqueue_count;
__device__ int item_count[NUM_LPS];
__device__ int insert_count[NUM_LPS];
__device__ bool root_modified[NUM_LPS]; // root modified --> heapify_down
__device__ long global_min;
__device__ long lp_min_ts[NUM_LPS];
// CUDA block size
static int num_threads_lps = min(MAX_THREADS__LOCAL_ARRAY_QUEUE, NUM_LPS);
static int num_blocks_lps = (NUM_LPS + num_threads_lps - 1) / num_threads_lps;
__device__ static int d_num_threads_lps = MIN(MAX_THREADS__LOCAL_ARRAY_QUEUE, NUM_LPS);
__device__ static int d_num_blocks_lps = (NUM_LPS + MIN(MAX_THREADS__LOCAL_ARRAY_QUEUE, NUM_LPS) - 1) / MIN(MAX_THREADS__LOCAL_ARRAY_QUEUE, NUM_LPS);
// -----------------------------
// Helper functions
// -----------------------------
static inline __device__ int get_left_child_index(int node)
{
return 2 * node + 1;
}
static inline __device__ int get_right_child_index(int node)
{
return 2 * node + 2;
}
static inline __device__ void swap(int lp, int n1, int n2)
{
queue_item temp = fel[lp * FEL_SIZE + n1];
fel[lp * FEL_SIZE + n1] = fel[lp * FEL_SIZE + n2];
fel[lp * FEL_SIZE + n2] = temp;
}
static __global__ void reset_enqueue_count()
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx == 0) {
enqueue_count = 0;
}
}
static __global__ void reset_dequeue_count()
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx == 0) {
dequeue_count = 0;
}
}
// -----------------------------
// Helper kernels
// -----------------------------
#ifdef _PHOLD
static __global__ void find_min_ts_device_pre()
{
for (int idx = threadIdx.x + blockDim.x * blockIdx.x;
idx < NUM_LPS;
idx += blockDim.x * gridDim.x) {
if (item_count[idx] > 0) {
lp_min_ts[idx] = fel[idx * FEL_SIZE].ts;
} else {
lp_min_ts[idx] = LONG_MAX;
}
}
}
#endif
#ifdef _PHOLD
__device__ void *temp_storage = NULL;
__device__ size_t temp_storage_bytes = 0;
static __global__ void find_min_ts_device()
{
if(!temp_storage)
{
hipcub::DeviceReduce::Min(temp_storage, temp_storage_bytes, lp_min_ts,
&global_min, NUM_LPS);
CudaSafeCall( hipMalloc(&temp_storage, temp_storage_bytes) );
}
hipcub::DeviceReduce::Min(temp_storage, temp_storage_bytes, lp_min_ts,
&global_min, NUM_LPS);
}
#endif
// -----------------------------
// Main kernels
// -----------------------------
__device__ void copy_last_(int idx)
{
if (item_count[idx] > 0 && root_modified[idx]) {
// assert(item_count[idx] > 0);
int last_index = item_count[idx] - 1;
swap(idx, 0, last_index);
item_count[idx]--;
}
}
__global__ void copy_last()
{
for (int idx = threadIdx.x + blockDim.x * blockIdx.x;
idx < NUM_LPS;
idx += blockDim.x * gridDim.x) {
if (item_count[idx] > 0 && root_modified[idx]) {
int last_index = item_count[idx] + insert_count[idx] - 1;
if(insert_count[idx] > 0)
insert_count[idx]--;
else
item_count[idx]--;
swap(idx, 0, last_index);
}
}
}
static __device__ bool heapify_node(int idx, int node)
{
int left_child = get_left_child_index(node);
int right_child = get_right_child_index(node);
int min = node;
if (left_child < item_count[idx]
&& fel[idx * FEL_SIZE + min] > fel[idx * FEL_SIZE + left_child]) {
min = left_child;
}
if (right_child < item_count[idx]
&& fel[idx * FEL_SIZE + min] > fel[idx * FEL_SIZE + right_child]) {
min = right_child;
}
if (min == node) {
if (VERBOSE_DEBUG) {
printf(" [heapify_node][LP %d] heap property restored at node %d\n", idx, node);
}
return false;
}
if (VERBOSE_DEBUG) {
#ifdef _PHOLD
printf(" [heapify_node][LP %d] swapping node %d (=%ld) with smallest child %d (=%ld)\n",
idx, node, fel[idx * FEL_SIZE + node].ts, min, fel[idx * FEL_SIZE + min].ts);
#endif
}
swap(idx, node, min);
return true;
}
static __device__ void heapify_node_down(int idx, int node)
{
while (true) {
int left_child = get_left_child_index(node);
int right_child = get_right_child_index(node);
int min = node;
if (left_child < item_count[idx]
&& fel[idx * FEL_SIZE + min] > fel[idx * FEL_SIZE + left_child]) {
min = left_child;
}
if (right_child < item_count[idx]
&& fel[idx * FEL_SIZE + min] > fel[idx * FEL_SIZE + right_child]) {
min = right_child;
}
if (min == node) {
if (VERBOSE_DEBUG) {
printf(" [heapify_node][LP %d] heap property restored at node %d\n", idx, node);
}
break;
}
if (VERBOSE_DEBUG) {
#ifdef _PHOLD
printf(" [heapify_node][LP %d] swapping node %d (=%ld) with smallest child %d (=%ld)\n",
idx, node, fel[idx * FEL_SIZE + node].ts, min, fel[idx * FEL_SIZE + min].ts);
#endif
}
swap(idx, node, min);
node = min;
}
}
static __global__ void heapify_up()
{
for (int idx = threadIdx.x + blockDim.x * blockIdx.x;
idx < NUM_LPS;
idx += blockDim.x * gridDim.x) {
int new_count = insert_count[idx];
for(int new_nodes = 0; new_nodes < new_count; new_nodes++)
{
item_count[idx]++;
insert_count[idx]--;
for (int node = (item_count[idx] - 2) / 2;; node = (node - 1) / 2) {
if(!heapify_node(idx, node) || node == 0)
break;
}
}
}
}
static __device__ void heapify_down_(int idx)
{
if (root_modified[idx]) {
root_modified[idx] = false;
heapify_node_down(idx, 0);
}
}
static __global__ void heapify_down()
{
int idx = 0;
for (int idx = threadIdx.x + blockDim.x * blockIdx.x;
idx < NUM_LPS;
idx += blockDim.x * gridDim.x) {
if (root_modified[idx]) {
root_modified[idx] = false;
heapify_node_down(idx, 0);
}
}
}
__global__ void local_heap_queue_print(int lp)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx == 0) {
printf("|");
for (int i = 0; i < FEL_SIZE; ++i) {
int fel_idx = lp * FEL_SIZE + i;
if (i < item_count[lp]) {
#ifdef _PHOLD
printf(" %ld |", fel[fel_idx].ts);
#endif
} else {
printf(" |");
}
}
printf("\n");
}
}
// -----------------------------
// Queue interface
// -----------------------------
void local_heap_queue_init()
{
queue_item *h_fel;
CudaSafeCall( hipMalloc(&h_fel, ITEM_BYTES * FEL_SIZE * NUM_NODES) );
CudaSafeCall( hipMemcpyToSymbol(fel, &h_fel, sizeof(fel)) );
printf("\n\n-----------------------------------\n");
printf("[ LHQ ] Memory consumption\n");
printf("-----------------------------------\n");
printf(" available: %.2f MB\n", (float) DEVICE_MEMORY_MB);
printf(" int arrays: %.2f KB\n", MEM_INT_ARRAYS / 1000.0);
printf(" long arrays: %.2f KB\n", MEM_LONG_ARRAYS / 1000.0);
printf(" fel: %.2f MB (%d items per FEL)\n",
NUM_NODES * FEL_SIZE * sizeof(queue_item) / 1000000.0, FEL_SIZE);
printf("-----------------------------------\n\n");
}
void local_heap_queue_finish()
{
}
int local_heap_queue_get_enqueue_count()
{
int c = -1;
CudaSafeCall( hipMemcpyFromSymbol(&c, enqueue_count, sizeof(c)) );
return c;
}
int local_heap_queue_get_dequeue_count()
{
int c = -1;
CudaSafeCall( hipMemcpyFromSymbol(&c, dequeue_count, sizeof(c)) );
return c;
}
void local_heap_queue_check_phold()
{
}
#ifdef _PHOLD
long local_heap_queue_get_min_ts()
{
hipLaunchKernelGGL(( find_min_ts_device_pre), dim3(num_blocks_lps), dim3(num_threads_lps), 0, 0, );
CudaCheckError();
hipLaunchKernelGGL(( find_min_ts_device), dim3(1), dim3(1), 0, 0, );
CudaCheckError();
long min;
CudaSafeCall( hipMemcpyFromSymbol(&min, global_min, sizeof(long)) );
return min;
}
#endif
void local_heap_queue_pre()
{
}
__global__ void update_item_count()
{
for (int idx = threadIdx.x + blockDim.x * blockIdx.x;
idx < NUM_LPS;
idx += blockDim.x * gridDim.x) {
item_count[idx] += insert_count[idx];
insert_count[idx] = 0;
}
}
void local_heap_queue_post()
{
#ifdef _CORRECTNESS_CHECKS
hipLaunchKernelGGL(( reset_enqueue_count), dim3(1), dim3(1), 0, 0, );
hipLaunchKernelGGL(( reset_dequeue_count), dim3(1), dim3(1), 0, 0, );
#endif
hipLaunchKernelGGL(( copy_last), dim3(num_blocks_lps), dim3(num_threads_lps), 0, 0, );
CudaCheckError();
hipLaunchKernelGGL(( heapify_down), dim3(num_blocks_lps), dim3(num_threads_lps), 0, 0, );
CudaCheckError();
hipLaunchKernelGGL(( heapify_up), dim3(num_blocks_lps), dim3(num_threads_lps), 0, 0, );
CudaCheckError();
}
void local_heap_queue_post_init()
{
#ifdef _CORRECTNESS_CHECKS
hipLaunchKernelGGL(( reset_enqueue_count), dim3(1), dim3(1), 0, 0, );
hipLaunchKernelGGL(( reset_dequeue_count), dim3(1), dim3(1), 0, 0, );
#endif
local_heap_queue_post();
}
__device__ bool queue_insert(queue_item item)
{
int lp = get_lp(item.node);
int insert_pos = item_count[lp] + atomicAdd(&insert_count[lp], 1);
int index = lp * FEL_SIZE + insert_pos;
if (VERBOSE_DEBUG) {
#ifdef _PHOLD
printf("inserting item with ts %ld at insert pos %d, index %d\n", item.ts,
insert_pos, index);
#endif
}
fel[index] = item;
#ifdef _CORRECTNESS_CHECKS
atomicAdd(&enqueue_count, 1);
#endif
return true;
}
__device__ int local_heap_queue_remove(int lp)
{
int index = lp * FEL_SIZE;
queue_set_done(index);
item_count[lp]--;
return index;
}
__device__ int queue_peek(queue_item **item, int lp)
{
int index = lp * FEL_SIZE;
if (item_count[lp] > 0
#ifdef _PHOLD
&& fel[index].ts < global_min + LOOKAHEAD
#endif
) {
*item = &(fel[index]);
return index;
}
else {
return -1;
}
}
/* index should only be a multiple of FEL_SIZE (as returned by d_peek) and as thus only the root node of
* each sub-FEL. Therefore, we can set root_modified without checking this. */
__device__ void local_heap_queue_set_done(int index)
{
int lp = index / FEL_SIZE;
root_modified[lp] = true;
#ifdef _A_STAR
copy_last_(lp);
heapify_down_(lp);
// heapify_(lp);
#endif
#ifdef _CORRECTNESS_CHECKS
atomicAdd(&dequeue_count, 1);
#endif
}
__device__ bool queue_is_empty(int lp)
{
return item_count[lp] + insert_count[lp] == 0;
}
__device__ int queue_length(int lp)
{
return item_count[lp];
}
__device__ void queue_clear(int lp)
{
#ifdef _CORRECTNESS_CHECKS
atomicAdd(&dequeue_count, queue_length(lp));
#endif
// root_modified not needed because an empty heap is already heapified.
item_count[lp] = 0;
}
// event items from PHOLD has no id-like element for a == method
#ifdef _A_STAR
__device__ void queue_insert_or_update(queue_item item, int lp)
{
queue_insert(item);
}
__device__ void queue_update(queue_item item, int lp)
{
bool found = false;
for (int i = threadIdx.x; i < queue_length(lp) && !found; i += blockDim.x) {
queue_item *in_queue = &fel[lp * FEL_SIZE + i];
if (*in_queue == item) {
*in_queue = item;
found = true;
break;
}
if (found) {
break;
}
}
__syncthreads();
assert(found);
}
#endif
#endif // #ifdef _LOCAL_HEAP_QUEUE
| ed94beb25f344fa7d062cc3ae0954ee3fbdb2d08.cu | #include "config.h"
#include "queue.h"
#ifdef _LOCAL_HEAP_QUEUE
#include <stdio.h>
#include <cub/cub.cuh>
#include "util.cuh"
#include "assert.h"
#include "local_heap_queue.cuh"
DECLARE_GPU_TIMER(heapify);
DECLARE_GPU_TIMER(heapify_down);
// base queue
__device__ queue_item *fel;
__device__ int dequeue_count;
__device__ int enqueue_count;
__device__ int item_count[NUM_LPS];
__device__ int insert_count[NUM_LPS];
__device__ bool root_modified[NUM_LPS]; // root modified --> heapify_down
__device__ long global_min;
__device__ long lp_min_ts[NUM_LPS];
// CUDA block size
static int num_threads_lps = min(MAX_THREADS__LOCAL_ARRAY_QUEUE, NUM_LPS);
static int num_blocks_lps = (NUM_LPS + num_threads_lps - 1) / num_threads_lps;
__device__ static int d_num_threads_lps = MIN(MAX_THREADS__LOCAL_ARRAY_QUEUE, NUM_LPS);
__device__ static int d_num_blocks_lps = (NUM_LPS + MIN(MAX_THREADS__LOCAL_ARRAY_QUEUE, NUM_LPS) - 1) / MIN(MAX_THREADS__LOCAL_ARRAY_QUEUE, NUM_LPS);
// -----------------------------
// Helper functions
// -----------------------------
static inline __device__ int get_left_child_index(int node)
{
return 2 * node + 1;
}
static inline __device__ int get_right_child_index(int node)
{
return 2 * node + 2;
}
static inline __device__ void swap(int lp, int n1, int n2)
{
queue_item temp = fel[lp * FEL_SIZE + n1];
fel[lp * FEL_SIZE + n1] = fel[lp * FEL_SIZE + n2];
fel[lp * FEL_SIZE + n2] = temp;
}
static __global__ void reset_enqueue_count()
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx == 0) {
enqueue_count = 0;
}
}
static __global__ void reset_dequeue_count()
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx == 0) {
dequeue_count = 0;
}
}
// -----------------------------
// Helper kernels
// -----------------------------
#ifdef _PHOLD
static __global__ void find_min_ts_device_pre()
{
for (int idx = threadIdx.x + blockDim.x * blockIdx.x;
idx < NUM_LPS;
idx += blockDim.x * gridDim.x) {
if (item_count[idx] > 0) {
lp_min_ts[idx] = fel[idx * FEL_SIZE].ts;
} else {
lp_min_ts[idx] = LONG_MAX;
}
}
}
#endif
#ifdef _PHOLD
__device__ void *temp_storage = NULL;
__device__ size_t temp_storage_bytes = 0;
static __global__ void find_min_ts_device()
{
if(!temp_storage)
{
cub::DeviceReduce::Min(temp_storage, temp_storage_bytes, lp_min_ts,
&global_min, NUM_LPS);
CudaSafeCall( cudaMalloc(&temp_storage, temp_storage_bytes) );
}
cub::DeviceReduce::Min(temp_storage, temp_storage_bytes, lp_min_ts,
&global_min, NUM_LPS);
}
#endif
// -----------------------------
// Main kernels
// -----------------------------
__device__ void copy_last_(int idx)
{
if (item_count[idx] > 0 && root_modified[idx]) {
// assert(item_count[idx] > 0);
int last_index = item_count[idx] - 1;
swap(idx, 0, last_index);
item_count[idx]--;
}
}
__global__ void copy_last()
{
for (int idx = threadIdx.x + blockDim.x * blockIdx.x;
idx < NUM_LPS;
idx += blockDim.x * gridDim.x) {
if (item_count[idx] > 0 && root_modified[idx]) {
int last_index = item_count[idx] + insert_count[idx] - 1;
if(insert_count[idx] > 0)
insert_count[idx]--;
else
item_count[idx]--;
swap(idx, 0, last_index);
}
}
}
static __device__ bool heapify_node(int idx, int node)
{
int left_child = get_left_child_index(node);
int right_child = get_right_child_index(node);
int min = node;
if (left_child < item_count[idx]
&& fel[idx * FEL_SIZE + min] > fel[idx * FEL_SIZE + left_child]) {
min = left_child;
}
if (right_child < item_count[idx]
&& fel[idx * FEL_SIZE + min] > fel[idx * FEL_SIZE + right_child]) {
min = right_child;
}
if (min == node) {
if (VERBOSE_DEBUG) {
printf(" [heapify_node][LP %d] heap property restored at node %d\n", idx, node);
}
return false;
}
if (VERBOSE_DEBUG) {
#ifdef _PHOLD
printf(" [heapify_node][LP %d] swapping node %d (=%ld) with smallest child %d (=%ld)\n",
idx, node, fel[idx * FEL_SIZE + node].ts, min, fel[idx * FEL_SIZE + min].ts);
#endif
}
swap(idx, node, min);
return true;
}
static __device__ void heapify_node_down(int idx, int node)
{
while (true) {
int left_child = get_left_child_index(node);
int right_child = get_right_child_index(node);
int min = node;
if (left_child < item_count[idx]
&& fel[idx * FEL_SIZE + min] > fel[idx * FEL_SIZE + left_child]) {
min = left_child;
}
if (right_child < item_count[idx]
&& fel[idx * FEL_SIZE + min] > fel[idx * FEL_SIZE + right_child]) {
min = right_child;
}
if (min == node) {
if (VERBOSE_DEBUG) {
printf(" [heapify_node][LP %d] heap property restored at node %d\n", idx, node);
}
break;
}
if (VERBOSE_DEBUG) {
#ifdef _PHOLD
printf(" [heapify_node][LP %d] swapping node %d (=%ld) with smallest child %d (=%ld)\n",
idx, node, fel[idx * FEL_SIZE + node].ts, min, fel[idx * FEL_SIZE + min].ts);
#endif
}
swap(idx, node, min);
node = min;
}
}
static __global__ void heapify_up()
{
for (int idx = threadIdx.x + blockDim.x * blockIdx.x;
idx < NUM_LPS;
idx += blockDim.x * gridDim.x) {
int new_count = insert_count[idx];
for(int new_nodes = 0; new_nodes < new_count; new_nodes++)
{
item_count[idx]++;
insert_count[idx]--;
for (int node = (item_count[idx] - 2) / 2;; node = (node - 1) / 2) {
if(!heapify_node(idx, node) || node == 0)
break;
}
}
}
}
static __device__ void heapify_down_(int idx)
{
if (root_modified[idx]) {
root_modified[idx] = false;
heapify_node_down(idx, 0);
}
}
static __global__ void heapify_down()
{
int idx = 0;
for (int idx = threadIdx.x + blockDim.x * blockIdx.x;
idx < NUM_LPS;
idx += blockDim.x * gridDim.x) {
if (root_modified[idx]) {
root_modified[idx] = false;
heapify_node_down(idx, 0);
}
}
}
__global__ void local_heap_queue_print(int lp)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx == 0) {
printf("|");
for (int i = 0; i < FEL_SIZE; ++i) {
int fel_idx = lp * FEL_SIZE + i;
if (i < item_count[lp]) {
#ifdef _PHOLD
printf(" %ld |", fel[fel_idx].ts);
#endif
} else {
printf(" |");
}
}
printf("\n");
}
}
// -----------------------------
// Queue interface
// -----------------------------
void local_heap_queue_init()
{
queue_item *h_fel;
CudaSafeCall( cudaMalloc(&h_fel, ITEM_BYTES * FEL_SIZE * NUM_NODES) );
CudaSafeCall( cudaMemcpyToSymbol(fel, &h_fel, sizeof(fel)) );
printf("\n\n-----------------------------------\n");
printf("[ LHQ ] Memory consumption\n");
printf("-----------------------------------\n");
printf(" available: %.2f MB\n", (float) DEVICE_MEMORY_MB);
printf(" int arrays: %.2f KB\n", MEM_INT_ARRAYS / 1000.0);
printf(" long arrays: %.2f KB\n", MEM_LONG_ARRAYS / 1000.0);
printf(" fel: %.2f MB (%d items per FEL)\n",
NUM_NODES * FEL_SIZE * sizeof(queue_item) / 1000000.0, FEL_SIZE);
printf("-----------------------------------\n\n");
}
void local_heap_queue_finish()
{
}
int local_heap_queue_get_enqueue_count()
{
int c = -1;
CudaSafeCall( cudaMemcpyFromSymbol(&c, enqueue_count, sizeof(c)) );
return c;
}
int local_heap_queue_get_dequeue_count()
{
int c = -1;
CudaSafeCall( cudaMemcpyFromSymbol(&c, dequeue_count, sizeof(c)) );
return c;
}
void local_heap_queue_check_phold()
{
}
#ifdef _PHOLD
long local_heap_queue_get_min_ts()
{
find_min_ts_device_pre<<<num_blocks_lps, num_threads_lps>>>();
CudaCheckError();
find_min_ts_device<<<1, 1>>>();
CudaCheckError();
long min;
CudaSafeCall( cudaMemcpyFromSymbol(&min, global_min, sizeof(long)) );
return min;
}
#endif
void local_heap_queue_pre()
{
}
__global__ void update_item_count()
{
for (int idx = threadIdx.x + blockDim.x * blockIdx.x;
idx < NUM_LPS;
idx += blockDim.x * gridDim.x) {
item_count[idx] += insert_count[idx];
insert_count[idx] = 0;
}
}
void local_heap_queue_post()
{
#ifdef _CORRECTNESS_CHECKS
reset_enqueue_count<<<1, 1>>>();
reset_dequeue_count<<<1, 1>>>();
#endif
copy_last<<<num_blocks_lps, num_threads_lps>>>();
CudaCheckError();
heapify_down<<<num_blocks_lps, num_threads_lps>>>();
CudaCheckError();
heapify_up<<<num_blocks_lps, num_threads_lps>>>();
CudaCheckError();
}
void local_heap_queue_post_init()
{
#ifdef _CORRECTNESS_CHECKS
reset_enqueue_count<<<1, 1>>>();
reset_dequeue_count<<<1, 1>>>();
#endif
local_heap_queue_post();
}
__device__ bool queue_insert(queue_item item)
{
int lp = get_lp(item.node);
int insert_pos = item_count[lp] + atomicAdd(&insert_count[lp], 1);
int index = lp * FEL_SIZE + insert_pos;
if (VERBOSE_DEBUG) {
#ifdef _PHOLD
printf("inserting item with ts %ld at insert pos %d, index %d\n", item.ts,
insert_pos, index);
#endif
}
fel[index] = item;
#ifdef _CORRECTNESS_CHECKS
atomicAdd(&enqueue_count, 1);
#endif
return true;
}
__device__ int local_heap_queue_remove(int lp)
{
int index = lp * FEL_SIZE;
queue_set_done(index);
item_count[lp]--;
return index;
}
__device__ int queue_peek(queue_item **item, int lp)
{
int index = lp * FEL_SIZE;
if (item_count[lp] > 0
#ifdef _PHOLD
&& fel[index].ts < global_min + LOOKAHEAD
#endif
) {
*item = &(fel[index]);
return index;
}
else {
return -1;
}
}
/* index should only be a multiple of FEL_SIZE (as returned by d_peek) and as thus only the root node of
* each sub-FEL. Therefore, we can set root_modified without checking this. */
__device__ void local_heap_queue_set_done(int index)
{
int lp = index / FEL_SIZE;
root_modified[lp] = true;
#ifdef _A_STAR
copy_last_(lp);
heapify_down_(lp);
// heapify_(lp);
#endif
#ifdef _CORRECTNESS_CHECKS
atomicAdd(&dequeue_count, 1);
#endif
}
__device__ bool queue_is_empty(int lp)
{
return item_count[lp] + insert_count[lp] == 0;
}
__device__ int queue_length(int lp)
{
return item_count[lp];
}
__device__ void queue_clear(int lp)
{
#ifdef _CORRECTNESS_CHECKS
atomicAdd(&dequeue_count, queue_length(lp));
#endif
// root_modified not needed because an empty heap is already heapified.
item_count[lp] = 0;
}
// event items from PHOLD has no id-like element for a == method
#ifdef _A_STAR
__device__ void queue_insert_or_update(queue_item item, int lp)
{
queue_insert(item);
}
__device__ void queue_update(queue_item item, int lp)
{
bool found = false;
for (int i = threadIdx.x; i < queue_length(lp) && !found; i += blockDim.x) {
queue_item *in_queue = &fel[lp * FEL_SIZE + i];
if (*in_queue == item) {
*in_queue = item;
found = true;
break;
}
if (found) {
break;
}
}
__syncthreads();
assert(found);
}
#endif
#endif // #ifdef _LOCAL_HEAP_QUEUE
|
61b689bfd4f32d65a0df6b3123ef981eb40d813c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@generated c Wed Nov 14 22:53:54 2012
*/
#include "common_magma.h"
#define PRECISION_c
#if (GPUSHMEM >= 200)
#define chemv_bs 32
#define bank_shift 33
/*******************************************************************************
* Functions for each specific cases - Lower case
*/
__global__ void
magmablas_chemv_200_L_special_mgpu_offset_32( magma_int_t n, cuFloatComplex alpha,
cuFloatComplex *A, magma_int_t lda,
cuFloatComplex *x, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex *y, magma_int_t incy,
cuFloatComplex *WC,
magma_int_t my_gpu_id,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t kstan)
{
magma_int_t tx = threadIdx.x ;
magma_int_t ty = threadIdx.y ;
magma_int_t blkc = blockIdx.x ;
if(blkc < my_gpu_id)
{
return;
}
cuFloatComplex res = MAGMA_C_ZERO;// used in scan the row
cuFloatComplex res_ = MAGMA_C_ZERO;// used in scan the column
cuFloatComplex res1 = MAGMA_C_ZERO;// tem for res
cuFloatComplex res2 = MAGMA_C_ZERO;// tem for res_
__shared__ cuFloatComplex la [chemv_bs][bank_shift];
__shared__ cuFloatComplex sdata [chemv_bs][9];
__shared__ cuFloatComplex buff [chemv_bs];
__shared__ cuFloatComplex buff2 [chemv_bs];
magma_int_t break_d = chemv_bs * blkc;
x += (break_d + tx ) * incx;
A += break_d ;
A += ty * lda + tx ;
if( ty == 0 )
{
buff[tx] = x[0];
if(blkc == 0 && my_gpu_id == 0 && tx < kstan)
{
MAGMA_C_SET2REAL(buff[tx], 0.0);
}
} // obtain the vector x store in buff;
magma_int_t flag = 0;
if ( (blkc % num_gpus) == my_gpu_id)
{
A += lda * (blkc/num_gpus) * chemv_bs; // change
#pragma unroll
for(magma_int_t j =0; j<chemv_bs; j +=8)
la[0][ bank_shift * (ty+j) + tx] = A[ j * lda];
__syncthreads();
#pragma unroll
for(magma_int_t i=ty*4; i<(ty * 4 + 4) ; i++){
if ( i < tx ) {
la[0][bank_shift * tx + i] = cuConjf( la[0][ i * bank_shift + tx] ) ;
}
}
__syncthreads();
#pragma unroll
for(magma_int_t j=0; j < 4 ; j++)
res += cuConjf( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4];
__syncthreads();
A -= lda * (blkc/num_gpus) * chemv_bs;
flag = 1;
}
x -= blkc * chemv_bs *incx ;
x= x- tx*incx;
magma_int_t wc_c = my_gpu_id ;
magma_int_t count = 0 ;
WC += break_d + tx;
magma_int_t num_blocks_iters = (blkc +1) /num_gpus - flag;
if( my_gpu_id < ( (blkc+1) % num_gpus) )
{
num_blocks_iters += 1;
}
x += (my_gpu_id ) * chemv_bs ;
if( blkc > my_gpu_id)
for(magma_int_t s=0; s<num_blocks_iters; s++)
{
MAGMA_C_SET2REAL(res_,0);
count++;
#pragma unroll
for(magma_int_t j =0; j< chemv_bs; j +=8)
la[0][ bank_shift * (ty+j) + tx] = A[ j * lda];
if( ty == 0 )
{
buff2[tx] = x[tx];
if(my_gpu_id == 0 && tx < kstan && count==1)
{
MAGMA_C_SET2REAL(buff2[tx], 0.0);
}
} // obtain the vector x store in buff2;
__syncthreads();
#pragma unroll
for(magma_int_t j=0; j < 4 ; j++)
{
res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8];
res_ += cuConjf( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum
}
sdata[tx][ty]= res_ ;
__syncthreads();
if( ty== 1 )
{
res2 = sdata[tx][0]+sdata[tx][1]
+ sdata[tx][2]+sdata[tx][3]
+ sdata[tx][4]+sdata[tx][5]
+ sdata[tx][6]+sdata[tx][7];
WC[wc_c*lda ] = res2;
}
wc_c += num_gpus;
x += num_gpus * chemv_bs;
A += lda * chemv_bs ;
}
la[0][bank_shift*tx+ty]= res ;
__syncthreads();
if( ty== 0 )
{
res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1]
+ la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3]
+ la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5]
+ la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7];
WC[0+lda*(blkc)] = res1;
}
}
/**************************************************************
* Lower case for generic sizes
*/
__global__ void
magmablas_chemv_200_L_generic_mgpu_offset_32(magma_int_t n, cuFloatComplex alpha,
cuFloatComplex *A, magma_int_t lda,
cuFloatComplex *x, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex *y, magma_int_t incy,
cuFloatComplex *WC,
magma_int_t m_mod_nb,
magma_int_t my_gpu_id,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t kstan)
{
magma_int_t tx = threadIdx.x ;
magma_int_t ty = threadIdx.y ;
magma_int_t blkc = blockIdx.x ;
if(blkc < my_gpu_id)
{
return;
}
cuFloatComplex res = MAGMA_C_ZERO;
cuFloatComplex res_ = MAGMA_C_ZERO;
cuFloatComplex res1 = MAGMA_C_ZERO;
cuFloatComplex res2 = MAGMA_C_ZERO;
__shared__ cuFloatComplex la [chemv_bs][bank_shift];
__shared__ cuFloatComplex sdata [chemv_bs][9];
__shared__ cuFloatComplex buff [chemv_bs];
__shared__ cuFloatComplex buff2 [chemv_bs];
magma_int_t break_d = chemv_bs * blkc;
x += (break_d + tx ) * incx;
A += break_d ;
A += lda * ty;
magma_int_t trackA ;
if( blkc == ( gridDim.x - 1 ) ) {
if( ty == 0 ){
if( tx > m_mod_nb )
{
MAGMA_C_SET2REAL(buff[tx],0);
}
else
buff[tx] = x[0];
}
if ( tx > m_mod_nb )
trackA=m_mod_nb;
else
trackA=tx;
A += trackA ;
}
else {
if( ty == 0 ){
buff[tx] = x[0];
}
trackA = tx;
A += trackA ;
}
if(ty == 0 )
{
if(my_gpu_id == 0 && blkc ==0 && tx < kstan)//
{
MAGMA_C_SET2REAL(buff[tx], 0.0);
}
}
magma_int_t flag = 0;
if ( (blkc % num_gpus) == my_gpu_id)
{
A += lda * (blkc/num_gpus) * chemv_bs; // change
// Somehow merging these two if - else creates problem
// It could be a potential bug -- from synchronization or from cuda or compiler
if( blkc == ( gridDim.x - 1 ) ) {
#pragma unroll
for(magma_int_t j =0; j< chemv_bs; j+=8){
if( ( ty + j ) > m_mod_nb )
{
MAGMA_C_SET2REAL(la[0][bank_shift*(ty+j)+tx], 9999);
}
else
la[0][bank_shift*(ty+j)+tx] = A[ j * lda];
}
}
else {
#pragma unroll
for(magma_int_t j =0; j< chemv_bs; j+=8){
la[0][bank_shift*(ty+j)+tx] = A[ j * lda];
}
}
__syncthreads();
#pragma unroll
for(magma_int_t i=ty*4; i<(ty*4+4) ; i++){
if ( i < tx ) {
la[0][bank_shift*tx+i] = cuConjf(la[0][i*bank_shift+tx]) ;
}
}
__syncthreads();
#pragma unroll
for(magma_int_t j=0; j < 4 ; j++)
res += cuConjf(la[0][bank_shift*tx+j+ty*4])* buff[j+ty*4];
__syncthreads();
A -= lda * (blkc/num_gpus) * chemv_bs;
flag = 1;
}
__syncthreads();
x= x - break_d *incx ;
x= x - tx * incx ;
magma_int_t wc_c = my_gpu_id ;
magma_int_t count = 0 ;
WC += break_d + tx;
magma_int_t num_blocks_iters = (blkc +1) /num_gpus - flag;
if( my_gpu_id < ( (blkc+1) % num_gpus) )
{
num_blocks_iters += 1;
}
x += (my_gpu_id ) * chemv_bs ;
if( blkc > my_gpu_id)
for(magma_int_t s=0; s<num_blocks_iters; s++)
{
MAGMA_C_SET2REAL(res_,0);
count++;
#pragma unroll
for(magma_int_t j =0; j< chemv_bs; j +=8)
la[0][ bank_shift * (ty+j) + tx] = A[ j * lda];
if( ty == 0 )
{
buff2[tx] = x[tx];
if(my_gpu_id == 0 && tx < kstan && count==1)//
{
MAGMA_C_SET2REAL(buff2[tx], 0.0);
}
} // obtain the vector x store in buff2;
__syncthreads();
#pragma unroll
for(magma_int_t j=0; j < 4 ; j++)
{
res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8];
res_ += cuConjf( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum
}
sdata[tx][ty]= res_ ;
__syncthreads();
if( ty== 1 )
{
res2 = sdata[tx][0]+sdata[tx][1]
+ sdata[tx][2]+sdata[tx][3]
+ sdata[tx][4]+sdata[tx][5]
+ sdata[tx][6]+sdata[tx][7];
WC[wc_c*lda ] = res2;
}
wc_c += num_gpus;
x += num_gpus * chemv_bs;
A += lda * chemv_bs ;
}
la[0][bank_shift*tx+ty]= res ;
__syncthreads();
if( ty== 0 )
{
res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1]
+ la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3]
+ la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5]
+ la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7];
WC[0+lda*(blkc)] = res1;
}
}
/**************************************************************
*
*/
__global__ void
magmablas_chemv_200_L_update_mgpu_offset_32(magma_int_t n, cuFloatComplex alpha,
cuFloatComplex* A, magma_int_t lda,
cuFloatComplex *x, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex *y, magma_int_t incy,
cuFloatComplex *WC,
magma_int_t my_gpu_id,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t kstan )
{
magma_int_t i;
magma_int_t tx = threadIdx.x ;
magma_int_t ind = blockIdx.x * chemv_bs + tx ;
cuFloatComplex Ca;
MAGMA_C_SET2REAL(Ca, 0) ;
WC+= ind + lda * blockIdx.x;
for(i = blockIdx.x* chemv_bs; i<n; i+= chemv_bs){
Ca += WC[0] ;
WC += chemv_bs;
}
if( ind < n && ind >= kstan)
y[ind * incy] = beta * y[ind * incy] + alpha * Ca ;
}
extern "C"
void magmablas_chemv_200_L_mgpu_offset_32(magma_int_t m, cuFloatComplex alpha,
cuFloatComplex *A, magma_int_t lda,
cuFloatComplex *X, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex *Y, magma_int_t incy,
cuFloatComplex *dC_work,
magma_int_t my_gpu_id,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t offset,
magma_int_t num_blocks_skipped)
{
magma_int_t the_chosen_block_id = offset / nb;
magma_int_t kstan = offset % nb;
A += lda * num_blocks_skipped * nb + the_chosen_block_id * nb;
X += the_chosen_block_id * nb;
Y += the_chosen_block_id * nb;
magma_int_t blocks;
if (m % chemv_bs==0)
blocks = m / chemv_bs;
else
blocks = m / chemv_bs + 1;
blocks -= the_chosen_block_id;
dim3 grid(blocks, 1, 1);
dim3 threads(nb, 8, 1);
dim3 threads_u(nb, 1, 1);
/*
* If matrix size is multiple of chemv_bs, we use a specific code.
* otherwise, we call the generic case.
*/
if(m % chemv_bs == 0 )
{
hipLaunchKernelGGL(( magmablas_chemv_200_L_special_mgpu_offset_32) , dim3(grid), dim3(threads), 0, magma_stream ,
m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, my_gpu_id, num_gpus, nb, kstan);
}
else
{
magma_int_t m_mod_nb = m%chemv_bs - 1;
hipLaunchKernelGGL(( magmablas_chemv_200_L_generic_mgpu_offset_32) , dim3(grid), dim3(threads), 0, magma_stream ,
m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_nb, my_gpu_id, num_gpus, nb, kstan);
}
hipLaunchKernelGGL(( magmablas_chemv_200_L_update_mgpu_offset_32), dim3(grid), dim3(threads_u), 0, magma_stream ,
m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, my_gpu_id, num_gpus, nb, kstan);
}
/*******************************************************************************
* Functions for each specific cases - Upper case
*/
__global__ void
magmablas_chemv_200_U_special_mgpu_offset_32( magma_int_t n, cuFloatComplex alpha,
cuFloatComplex *A, magma_int_t lda,
cuFloatComplex *x, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex *y, magma_int_t incy,
cuFloatComplex *WC,
magma_int_t my_gpu_id,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t kstan)
{
magma_int_t tx = threadIdx.x ;
magma_int_t ty = threadIdx.y ;
magma_int_t blkc = blockIdx.x ;
cuFloatComplex res = MAGMA_C_ZERO;// used in scan the row
cuFloatComplex res_ = MAGMA_C_ZERO;// used in scan the column
cuFloatComplex res1 = MAGMA_C_ZERO;// tem for res
cuFloatComplex res2 = MAGMA_C_ZERO;// tem for res_
__shared__ cuFloatComplex la [chemv_bs][bank_shift];
__shared__ cuFloatComplex buff [chemv_bs];
__shared__ cuFloatComplex buff2 [chemv_bs];
magma_int_t break_d = chemv_bs * blkc;
x += (break_d + tx ) * incx;
A += break_d ;
A += ty * lda + tx ;
if( ty == 0 )
{
buff[tx] = x[0];
if(blkc == 0 && tx < kstan)
{
MAGMA_C_SET2REAL(buff[tx], 0.0);
}
} // obtain the vector x store in buff;
if ( (blkc % num_gpus) == my_gpu_id)
{
A += lda * (blkc/num_gpus) * chemv_bs; // change
#pragma unroll
for(magma_int_t j =0; j<chemv_bs; j +=8)
la[0][ bank_shift * (ty+j) + tx] = A[ j * lda];
__syncthreads();
#pragma unroll
for(magma_int_t i=ty*4; i<(ty * 4 + 4) ; i++){
if ( i > tx )
{
la[0][bank_shift * tx + i] = cuConjf(la[0][ i * bank_shift + tx]) ;
}
}
__syncthreads();
#pragma unroll
for(magma_int_t j=0; j < 4 ; j++)
res += cuConjf( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4];
__syncthreads();
A -= lda * (blkc/num_gpus) * chemv_bs;
}
__syncthreads();
x -= (break_d + tx ) * incx;// return to the beginning
x += (my_gpu_id ) * chemv_bs ;//
magma_int_t wc_c = my_gpu_id ;
magma_int_t total_blocks_gpu = gridDim.x /num_gpus;
if( my_gpu_id < ( gridDim.x % num_gpus) )
{
total_blocks_gpu += 1;
}
magma_int_t shift = (blkc +1) /num_gpus ;
if( my_gpu_id < ( (blkc+1) % num_gpus) )
{
shift += 1;
}
#pragma unroll
for(magma_int_t s=0; s<shift; s++)
{
x += num_gpus * chemv_bs;
A += lda * chemv_bs ;
wc_c += num_gpus;
}
WC += break_d + tx;
magma_int_t num_blocks_iters = total_blocks_gpu - shift;
magma_int_t count = 0;
for(magma_int_t s=0; s<num_blocks_iters; s++)
{
MAGMA_C_SET2REAL(res_,0);
count++;
#pragma unroll
for(magma_int_t j =0; j<chemv_bs; j +=8)
la[0][ bank_shift * (ty+j) + tx] = A[ j * lda];
if( ty == 0 )
{
buff2[tx] = x[tx];
} // obtain the vector x store in buff;
__syncthreads();
#pragma unroll
for(magma_int_t j=0; j < 4 ; j++)
{
res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8];
res_ += cuConjf( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum
}
__syncthreads();
la[0][bank_shift*tx+ty]= res_ ;
__syncthreads();
if( ty== 0 )
{
res2 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1]
+ la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3]
+ la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5]
+ la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7];
WC[wc_c*lda ] = res2;
}
__syncthreads();
wc_c += num_gpus;
x += num_gpus * chemv_bs;
A += lda * chemv_bs ;
}
la[0][bank_shift*tx+ty]= res ;
__syncthreads();
if( ty== 0 )
{
res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1]
+ la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3]
+ la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5]
+ la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7];
WC[0+lda*(blkc)] = res1;
}
}
__global__ void
magmablas_chemv_200_U_generic_mgpu_offset_32(magma_int_t n, cuFloatComplex alpha,
cuFloatComplex *A, magma_int_t lda,
cuFloatComplex *x, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex *y, magma_int_t incy,
cuFloatComplex *WC,
magma_int_t m_mod_thread_x,
magma_int_t my_gpu_id,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t kstan,
magma_int_t the_right_gpu)
{
magma_int_t tx = threadIdx.x ;
magma_int_t ty = threadIdx.y ;
magma_int_t blkc = blockIdx.x ;
cuFloatComplex res = MAGMA_C_ZERO;
cuFloatComplex res_ = MAGMA_C_ZERO;
cuFloatComplex res1 = MAGMA_C_ZERO;
cuFloatComplex res2 = MAGMA_C_ZERO;
__shared__ cuFloatComplex la [chemv_bs][bank_shift];
__shared__ cuFloatComplex buff [chemv_bs];
__shared__ cuFloatComplex buff2 [chemv_bs];
magma_int_t break_d = chemv_bs * blkc;
x += (break_d + tx ) * incx;
A += break_d ;
A += lda * ty;
magma_int_t trackA ;
if( blkc == ( gridDim.x - 1 ))
{
if( ty == 0 ){
if( tx > m_mod_thread_x )
{
MAGMA_C_SET2REAL(buff[tx],0);
}
else
buff[tx] = x[0];
}
if ( tx > m_mod_thread_x )
trackA=m_mod_thread_x;
else
trackA=tx;
A += trackA ;
}
else
{
if( ty == 0 )
{
buff[tx] = x[0];
}
A += tx ;
}
if(ty == 0 )
{
if(blkc ==0 && tx < kstan)//
{
MAGMA_C_SET2REAL(buff[tx], 0.0);
}
}
if ( (blkc % num_gpus) == my_gpu_id)
{
A += lda * (blkc/num_gpus) * chemv_bs; // change
if( blkc == ( gridDim.x - 1 ) ) {
#pragma unroll
for(magma_int_t j =0; j<chemv_bs; j+=8){
if( ( ty + j ) > m_mod_thread_x )
{
MAGMA_C_SET2REAL(la[0][bank_shift*(ty+j)+tx], 9999);
}
else
la[0][bank_shift*(ty+j)+tx] = A[ j * lda];
}
}
else {
#pragma unroll
for(magma_int_t j =0; j<chemv_bs; j+=8){
la[0][bank_shift*(ty+j)+tx] = A[ j * lda];
}
}
__syncthreads();
#pragma unroll
for(magma_int_t i=ty*4; i<(ty*4+4) ; i++){
if ( i > tx )
{
la[0][bank_shift * tx + i] = cuConjf(la[0][ i * bank_shift + tx]) ;
}
}
__syncthreads();
#pragma unroll
for(magma_int_t j=0; j < 4 ; j++)
res += cuConjf(la[0][bank_shift*tx+j+ty*4])* buff[j+ty*4];
__syncthreads();
A -= lda * (blkc/num_gpus) * chemv_bs;
}
x -= (break_d + tx ) * incx;// return to the beginning
x += (my_gpu_id ) * chemv_bs ;//
magma_int_t wc_c = my_gpu_id ;
magma_int_t total_blocks_gpu = gridDim.x /num_gpus;
if( my_gpu_id < ( gridDim.x % num_gpus) )
{
total_blocks_gpu += 1;
}
magma_int_t shift = (blkc +1) /num_gpus ;
if( my_gpu_id < ( (blkc+1) % num_gpus) )
{
shift += 1;
}
#pragma unroll
for(magma_int_t s=0; s<shift; s++)
{
x += num_gpus * chemv_bs;
A += lda * chemv_bs ;
wc_c += num_gpus;
}
WC += break_d + tx;
magma_int_t num_blocks_iters = total_blocks_gpu - shift;
magma_int_t count = 0;
for(magma_int_t s=0; s<num_blocks_iters; s++)
{
MAGMA_C_SET2REAL(res_,0);
count++;
if(my_gpu_id == the_right_gpu && s==num_blocks_iters-1)
{
if( ty == 0 )
{
if( tx > m_mod_thread_x )
{
MAGMA_C_SET2REAL(buff2[tx],0);
}
else
buff2[tx] = x[tx];
}
#pragma unroll
for(magma_int_t j =0; j<chemv_bs; j+=8)
{
if( ( ty + j ) > m_mod_thread_x )
{
MAGMA_C_SET2REAL(la[0][bank_shift*(ty+j)+tx], 0);
}
else
la[0][bank_shift*(ty+j)+tx] = A[ j * lda];
}
__syncthreads();
}// end of the_right_gpu
else
{
#pragma unroll
for(magma_int_t j =0; j<chemv_bs; j +=8)
la[0][ bank_shift * (ty+j) + tx] = A[ j * lda];
if( ty == 0 )
{
buff2[tx] = x[tx];
} // obtain the vector x store in buff;
__syncthreads();
}
#pragma unroll
for(magma_int_t j=0; j < 4 ; j++)
{
res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8];
res_ += cuConjf( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum
}
__syncthreads();
la[0][bank_shift*tx+ty]= res_ ;
__syncthreads();
if( ty== 0 )
{
res2 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1]
+ la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3]
+ la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5]
+ la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7];
WC[wc_c*lda ] = res2;
}
__syncthreads();
wc_c += num_gpus;
x += num_gpus * chemv_bs;
A += lda * chemv_bs ;
}
la[0][bank_shift*tx+ty]= res ;
__syncthreads();
if( ty== 0 )
{
res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1]
+ la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3]
+ la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5]
+ la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7];
WC[0+lda*(blkc)] = res1;
}
}
__global__ void
magmablas_chemv_200_U_update_mgpu_offset_32(magma_int_t n, cuFloatComplex alpha,
cuFloatComplex* A, magma_int_t lda,
cuFloatComplex *x, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex *y, magma_int_t incy,
cuFloatComplex *WC,
magma_int_t my_gpu_id,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t kstan )
{
magma_int_t i;
magma_int_t tx = threadIdx.x ;
magma_int_t ind = blockIdx.x * chemv_bs + tx ;
cuFloatComplex Ca;
MAGMA_C_SET2REAL(Ca, 0) ;
WC+= blockIdx.x * lda + tx;
for(i = 0; i<(blockIdx.x+1)*chemv_bs; i+= chemv_bs)
{
Ca += WC[0] ;
WC += chemv_bs ;
}
if( ind < n && ind >= kstan)
y[ind * incy] = beta * y[ind * incy] + alpha * Ca ;
}
extern "C"
void magmablas_chemv_200_U_mgpu_offset_32(magma_int_t m, cuFloatComplex alpha,
cuFloatComplex *A, magma_int_t lda,
cuFloatComplex *X, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex *Y, magma_int_t incy,
cuFloatComplex *dC_work,
magma_int_t my_gpu_id,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t offset,
magma_int_t num_blocks_skipped,
magma_int_t the_right_gpu)
{
magma_int_t the_chosen_block_id = offset / nb;
magma_int_t kstan = offset % nb;
A += lda * num_blocks_skipped * nb + the_chosen_block_id * nb;
X += the_chosen_block_id * nb;
Y += the_chosen_block_id * nb;
magma_int_t blocks;
if (m % chemv_bs==0)
blocks = m / chemv_bs;
else
blocks = m / chemv_bs + 1;
blocks -= the_chosen_block_id;
dim3 grid(blocks, 1, 1);
dim3 threads(nb, 8, 1);
dim3 threads_u(nb, 1, 1);
/*
* If matrix size is multiple of chemv_bs, we use a specific code.
* otherwise, we call the generic case.
*/
if(m % chemv_bs == 0 ) {
hipLaunchKernelGGL(( magmablas_chemv_200_U_special_mgpu_offset_32) , dim3(grid), dim3(threads), 0, magma_stream ,
m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, my_gpu_id, num_gpus, nb, kstan);
}
else{
magma_int_t m_mod_thread_x = m%chemv_bs - 1;
hipLaunchKernelGGL(( magmablas_chemv_200_U_generic_mgpu_offset_32) , dim3(grid), dim3(threads), 0, magma_stream ,
m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_thread_x, my_gpu_id, num_gpus, nb, kstan, the_right_gpu);
}
hipLaunchKernelGGL(( magmablas_chemv_200_U_update_mgpu_offset_32), dim3(grid), dim3(threads_u), 0, magma_stream ,
m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, my_gpu_id, num_gpus, nb, kstan);
}
/*************************************************************************
Purpose
=======
magmablas_chemv performs the matrix-vector operation on fermi:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n hermitian matrix.
Arguments
==========
UPLO - CHARACTER*1.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
UPLO = 'U' or 'u' Only the upper triangular part of A
is to be referenced.
UPLO = 'L' or 'l' Only the lower triangular part of A
is to be referenced.
Unchanged on exit.
N - INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
Unchanged on exit.
ALPHA - COMPLEX*16 .
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A - COMPLEX*16 array of DIMENSION ( LDA, n ).
Before entry with UPLO = 'U' or 'u', the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = 'L' or 'l', the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
Unchanged on exit.
LDA - INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. LDA must be at least
max( 1, n ).
Unchanged on exit.
It is recommended that lda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
X - COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
Unchanged on exit.
INCX - INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
Unchanged on exit.
BETA - COMPLEX*16 .
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
Unchanged on exit.
Y - COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
INCY - INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
Unchanged on exit.
*/
extern "C"
magma_int_t
magmablas_chemv_mgpu_32_offset( char uplo, magma_int_t n,
cuFloatComplex alpha,
cuFloatComplex **A, magma_int_t lda,
cuFloatComplex **X, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex **Y, magma_int_t incy,
cuFloatComplex **work, magma_int_t lwork,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t offset,
hipStream_t stream[][10])
{
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return MAGMA_SUCCESS;
magma_int_t blocks = n / chemv_bs + (n % chemv_bs != 0);
magma_int_t workspace = lda * (blocks + 1);
if (lwork < workspace){
printf("Not enough work space in magmablas_chemv: passed %d, required %d\n",
lwork, workspace);
exit(1);
}
if(nb != 32)
{
printf("Error in magmablas_chemv_200_mgpu: nb != 32, program will exit! please reallocate your matrix among GPUs\n");
exit(0);
}
magma_int_t i = 0;
for(i=0; i<num_gpus; i++)
{
magma_setdevice(i);
magmablasSetKernelStream(stream[i][0]);
magma_int_t the_chosen_block_id = offset / nb;
magma_int_t the_chosen_gpu_id = the_chosen_block_id % num_gpus;
magma_int_t num_blocks_skipped = the_chosen_block_id / num_gpus;
if(i < the_chosen_gpu_id)
{
num_blocks_skipped += 1;
}
int new_gpu_id = ( i + num_gpus - the_chosen_gpu_id ) % num_gpus;
magma_int_t the_right_block_id = n / nb ;
magma_int_t the_right_gpu = the_right_block_id % num_gpus;
the_right_gpu = ( the_right_gpu + num_gpus - the_chosen_gpu_id ) % num_gpus;
// the_right_gpu is used in Upper generic case.
if ( upper)
{
magmablas_chemv_200_U_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i],
new_gpu_id, num_gpus, nb, offset, num_blocks_skipped, the_right_gpu);
}
else
{
magmablas_chemv_200_L_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i],
new_gpu_id, num_gpus, nb, offset, num_blocks_skipped);
}
}
return MAGMA_SUCCESS;
}
extern "C"
magma_int_t
magmablas_chemv2_mgpu_32_offset( char uplo, magma_int_t n,
cuFloatComplex alpha,
cuFloatComplex **A, magma_int_t lda,
cuFloatComplex **X, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex **Y, magma_int_t incy,
cuFloatComplex **work, magma_int_t lwork,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t offset)
{
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return MAGMA_SUCCESS;
magma_int_t blocks = n / chemv_bs + (n % chemv_bs != 0);
magma_int_t workspace = lda * (blocks + 1);
if (lwork < workspace){
printf("Not enough work space in magmablas_chemv: passed %d, required %d\n",
lwork, workspace);
exit(1);
}
if(nb != 32)
{
printf("Error in magmablas_chemv_200_mgpu: nb != 32, program will exit! please reallocate your matrix among GPUs\n");
exit(0);
}
magma_int_t i = 0;
for(i=0; i<num_gpus; i++)
{
magma_setdevice(i);
// magmablasSetKernelStream(stream[i][0]);
magma_int_t the_chosen_block_id = offset / nb;
magma_int_t the_chosen_gpu_id = the_chosen_block_id % num_gpus;
magma_int_t num_blocks_skipped = the_chosen_block_id / num_gpus;
if(i < the_chosen_gpu_id)
{
num_blocks_skipped += 1;
}
int new_gpu_id = ( i + num_gpus - the_chosen_gpu_id ) % num_gpus;
magma_int_t the_right_block_id = n / nb ;
magma_int_t the_right_gpu = the_right_block_id % num_gpus;
the_right_gpu = ( the_right_gpu + num_gpus - the_chosen_gpu_id ) % num_gpus;
// the_right_gpu is used in Upper generic case.
if ( upper)
{
magmablas_chemv_200_U_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i],
new_gpu_id, num_gpus, nb, offset, num_blocks_skipped, the_right_gpu);
}
else
magmablas_chemv_200_L_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i],
new_gpu_id, num_gpus, nb, offset, num_blocks_skipped);
}
return MAGMA_SUCCESS;
}
extern "C"
magma_int_t
magmablas_chemv2_mgpu_32( char uplo, magma_int_t n,
cuFloatComplex alpha,
cuFloatComplex **A, magma_int_t lda,
cuFloatComplex **X, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex **Y, magma_int_t incy,
cuFloatComplex **work, magma_int_t lwork,
magma_int_t num_gpus,
magma_int_t nb)
{
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return MAGMA_SUCCESS;
magma_int_t blocks = n / chemv_bs + (n % chemv_bs != 0);
magma_int_t workspace = lda * (blocks + 1);
if (lwork < workspace){
printf("Not enough work space in magmablas_chemv: passed %d, required %d\n",
lwork, workspace);
exit(1);
}
if(nb != 32)
{
printf("Error in magmablas_chemv_200_mgpu: nb != 32, program will exit! please reallocate your matrix among GPUs\n");
exit(0);
}
magma_int_t i = 0;
for(i=0; i<num_gpus; i++)
{
magma_setdevice(i);
magma_int_t the_right_block_id = n / nb ;
magma_int_t the_right_gpu = the_right_block_id % num_gpus;
// the_right_gpu is used in Upper generic case.
if ( upper)
{
magmablas_chemv_200_U_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i],
i, num_gpus, nb, 0, 0, the_right_gpu);
}
else
magmablas_chemv_200_L_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i],
i, num_gpus, nb, 0, 0);
}
return MAGMA_SUCCESS;
}
__global__ void
kernel_fillZero(cuFloatComplex *A, magma_int_t size)
{
magma_int_t id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size)
{
MAGMA_C_SET2REAL(A[id], 0.0);
}
}
void fillZero(cuFloatComplex *A, magma_int_t size)
{
magma_int_t blocks = (size-1)/512 + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(512, 1, 1);
hipLaunchKernelGGL(( kernel_fillZero), dim3(grid), dim3(threads), 0, 0, A, size);
}
#endif /* (GPUSHMEM >= 200) */
| 61b689bfd4f32d65a0df6b3123ef981eb40d813c.cu | /*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@generated c Wed Nov 14 22:53:54 2012
*/
#include "common_magma.h"
#define PRECISION_c
#if (GPUSHMEM >= 200)
#define chemv_bs 32
#define bank_shift 33
/*******************************************************************************
* Functions for each specific cases - Lower case
*/
__global__ void
magmablas_chemv_200_L_special_mgpu_offset_32( magma_int_t n, cuFloatComplex alpha,
cuFloatComplex *A, magma_int_t lda,
cuFloatComplex *x, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex *y, magma_int_t incy,
cuFloatComplex *WC,
magma_int_t my_gpu_id,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t kstan)
{
magma_int_t tx = threadIdx.x ;
magma_int_t ty = threadIdx.y ;
magma_int_t blkc = blockIdx.x ;
if(blkc < my_gpu_id)
{
return;
}
cuFloatComplex res = MAGMA_C_ZERO;// used in scan the row
cuFloatComplex res_ = MAGMA_C_ZERO;// used in scan the column
cuFloatComplex res1 = MAGMA_C_ZERO;// tem for res
cuFloatComplex res2 = MAGMA_C_ZERO;// tem for res_
__shared__ cuFloatComplex la [chemv_bs][bank_shift];
__shared__ cuFloatComplex sdata [chemv_bs][9];
__shared__ cuFloatComplex buff [chemv_bs];
__shared__ cuFloatComplex buff2 [chemv_bs];
magma_int_t break_d = chemv_bs * blkc;
x += (break_d + tx ) * incx;
A += break_d ;
A += ty * lda + tx ;
if( ty == 0 )
{
buff[tx] = x[0];
if(blkc == 0 && my_gpu_id == 0 && tx < kstan)
{
MAGMA_C_SET2REAL(buff[tx], 0.0);
}
} // obtain the vector x store in buff;
magma_int_t flag = 0;
if ( (blkc % num_gpus) == my_gpu_id)
{
A += lda * (blkc/num_gpus) * chemv_bs; // change
#pragma unroll
for(magma_int_t j =0; j<chemv_bs; j +=8)
la[0][ bank_shift * (ty+j) + tx] = A[ j * lda];
__syncthreads();
#pragma unroll
for(magma_int_t i=ty*4; i<(ty * 4 + 4) ; i++){
if ( i < tx ) {
la[0][bank_shift * tx + i] = cuConjf( la[0][ i * bank_shift + tx] ) ;
}
}
__syncthreads();
#pragma unroll
for(magma_int_t j=0; j < 4 ; j++)
res += cuConjf( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4];
__syncthreads();
A -= lda * (blkc/num_gpus) * chemv_bs;
flag = 1;
}
x -= blkc * chemv_bs *incx ;
x= x- tx*incx;
magma_int_t wc_c = my_gpu_id ;
magma_int_t count = 0 ;
WC += break_d + tx;
magma_int_t num_blocks_iters = (blkc +1) /num_gpus - flag;
if( my_gpu_id < ( (blkc+1) % num_gpus) )
{
num_blocks_iters += 1;
}
x += (my_gpu_id ) * chemv_bs ;
if( blkc > my_gpu_id)
for(magma_int_t s=0; s<num_blocks_iters; s++)
{
MAGMA_C_SET2REAL(res_,0);
count++;
#pragma unroll
for(magma_int_t j =0; j< chemv_bs; j +=8)
la[0][ bank_shift * (ty+j) + tx] = A[ j * lda];
if( ty == 0 )
{
buff2[tx] = x[tx];
if(my_gpu_id == 0 && tx < kstan && count==1)
{
MAGMA_C_SET2REAL(buff2[tx], 0.0);
}
} // obtain the vector x store in buff2;
__syncthreads();
#pragma unroll
for(magma_int_t j=0; j < 4 ; j++)
{
res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8];
res_ += cuConjf( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum
}
sdata[tx][ty]= res_ ;
__syncthreads();
if( ty== 1 )
{
res2 = sdata[tx][0]+sdata[tx][1]
+ sdata[tx][2]+sdata[tx][3]
+ sdata[tx][4]+sdata[tx][5]
+ sdata[tx][6]+sdata[tx][7];
WC[wc_c*lda ] = res2;
}
wc_c += num_gpus;
x += num_gpus * chemv_bs;
A += lda * chemv_bs ;
}
la[0][bank_shift*tx+ty]= res ;
__syncthreads();
if( ty== 0 )
{
res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1]
+ la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3]
+ la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5]
+ la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7];
WC[0+lda*(blkc)] = res1;
}
}
/**************************************************************
* Lower case for generic sizes
*/
__global__ void
magmablas_chemv_200_L_generic_mgpu_offset_32(magma_int_t n, cuFloatComplex alpha,
cuFloatComplex *A, magma_int_t lda,
cuFloatComplex *x, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex *y, magma_int_t incy,
cuFloatComplex *WC,
magma_int_t m_mod_nb,
magma_int_t my_gpu_id,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t kstan)
{
magma_int_t tx = threadIdx.x ;
magma_int_t ty = threadIdx.y ;
magma_int_t blkc = blockIdx.x ;
if(blkc < my_gpu_id)
{
return;
}
cuFloatComplex res = MAGMA_C_ZERO;
cuFloatComplex res_ = MAGMA_C_ZERO;
cuFloatComplex res1 = MAGMA_C_ZERO;
cuFloatComplex res2 = MAGMA_C_ZERO;
__shared__ cuFloatComplex la [chemv_bs][bank_shift];
__shared__ cuFloatComplex sdata [chemv_bs][9];
__shared__ cuFloatComplex buff [chemv_bs];
__shared__ cuFloatComplex buff2 [chemv_bs];
magma_int_t break_d = chemv_bs * blkc;
x += (break_d + tx ) * incx;
A += break_d ;
A += lda * ty;
magma_int_t trackA ;
if( blkc == ( gridDim.x - 1 ) ) {
if( ty == 0 ){
if( tx > m_mod_nb )
{
MAGMA_C_SET2REAL(buff[tx],0);
}
else
buff[tx] = x[0];
}
if ( tx > m_mod_nb )
trackA=m_mod_nb;
else
trackA=tx;
A += trackA ;
}
else {
if( ty == 0 ){
buff[tx] = x[0];
}
trackA = tx;
A += trackA ;
}
if(ty == 0 )
{
if(my_gpu_id == 0 && blkc ==0 && tx < kstan)//
{
MAGMA_C_SET2REAL(buff[tx], 0.0);
}
}
magma_int_t flag = 0;
if ( (blkc % num_gpus) == my_gpu_id)
{
A += lda * (blkc/num_gpus) * chemv_bs; // change
// Somehow merging these two if - else creates problem
// It could be a potential bug -- from synchronization or from cuda or compiler
if( blkc == ( gridDim.x - 1 ) ) {
#pragma unroll
for(magma_int_t j =0; j< chemv_bs; j+=8){
if( ( ty + j ) > m_mod_nb )
{
MAGMA_C_SET2REAL(la[0][bank_shift*(ty+j)+tx], 9999);
}
else
la[0][bank_shift*(ty+j)+tx] = A[ j * lda];
}
}
else {
#pragma unroll
for(magma_int_t j =0; j< chemv_bs; j+=8){
la[0][bank_shift*(ty+j)+tx] = A[ j * lda];
}
}
__syncthreads();
#pragma unroll
for(magma_int_t i=ty*4; i<(ty*4+4) ; i++){
if ( i < tx ) {
la[0][bank_shift*tx+i] = cuConjf(la[0][i*bank_shift+tx]) ;
}
}
__syncthreads();
#pragma unroll
for(magma_int_t j=0; j < 4 ; j++)
res += cuConjf(la[0][bank_shift*tx+j+ty*4])* buff[j+ty*4];
__syncthreads();
A -= lda * (blkc/num_gpus) * chemv_bs;
flag = 1;
}
__syncthreads();
x= x - break_d *incx ;
x= x - tx * incx ;
magma_int_t wc_c = my_gpu_id ;
magma_int_t count = 0 ;
WC += break_d + tx;
magma_int_t num_blocks_iters = (blkc +1) /num_gpus - flag;
if( my_gpu_id < ( (blkc+1) % num_gpus) )
{
num_blocks_iters += 1;
}
x += (my_gpu_id ) * chemv_bs ;
if( blkc > my_gpu_id)
for(magma_int_t s=0; s<num_blocks_iters; s++)
{
MAGMA_C_SET2REAL(res_,0);
count++;
#pragma unroll
for(magma_int_t j =0; j< chemv_bs; j +=8)
la[0][ bank_shift * (ty+j) + tx] = A[ j * lda];
if( ty == 0 )
{
buff2[tx] = x[tx];
if(my_gpu_id == 0 && tx < kstan && count==1)//
{
MAGMA_C_SET2REAL(buff2[tx], 0.0);
}
} // obtain the vector x store in buff2;
__syncthreads();
#pragma unroll
for(magma_int_t j=0; j < 4 ; j++)
{
res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8];
res_ += cuConjf( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum
}
sdata[tx][ty]= res_ ;
__syncthreads();
if( ty== 1 )
{
res2 = sdata[tx][0]+sdata[tx][1]
+ sdata[tx][2]+sdata[tx][3]
+ sdata[tx][4]+sdata[tx][5]
+ sdata[tx][6]+sdata[tx][7];
WC[wc_c*lda ] = res2;
}
wc_c += num_gpus;
x += num_gpus * chemv_bs;
A += lda * chemv_bs ;
}
la[0][bank_shift*tx+ty]= res ;
__syncthreads();
if( ty== 0 )
{
res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1]
+ la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3]
+ la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5]
+ la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7];
WC[0+lda*(blkc)] = res1;
}
}
/**************************************************************
*
*/
__global__ void
magmablas_chemv_200_L_update_mgpu_offset_32(magma_int_t n, cuFloatComplex alpha,
cuFloatComplex* A, magma_int_t lda,
cuFloatComplex *x, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex *y, magma_int_t incy,
cuFloatComplex *WC,
magma_int_t my_gpu_id,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t kstan )
{
magma_int_t i;
magma_int_t tx = threadIdx.x ;
magma_int_t ind = blockIdx.x * chemv_bs + tx ;
cuFloatComplex Ca;
MAGMA_C_SET2REAL(Ca, 0) ;
WC+= ind + lda * blockIdx.x;
for(i = blockIdx.x* chemv_bs; i<n; i+= chemv_bs){
Ca += WC[0] ;
WC += chemv_bs;
}
if( ind < n && ind >= kstan)
y[ind * incy] = beta * y[ind * incy] + alpha * Ca ;
}
extern "C"
void magmablas_chemv_200_L_mgpu_offset_32(magma_int_t m, cuFloatComplex alpha,
cuFloatComplex *A, magma_int_t lda,
cuFloatComplex *X, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex *Y, magma_int_t incy,
cuFloatComplex *dC_work,
magma_int_t my_gpu_id,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t offset,
magma_int_t num_blocks_skipped)
{
magma_int_t the_chosen_block_id = offset / nb;
magma_int_t kstan = offset % nb;
A += lda * num_blocks_skipped * nb + the_chosen_block_id * nb;
X += the_chosen_block_id * nb;
Y += the_chosen_block_id * nb;
magma_int_t blocks;
if (m % chemv_bs==0)
blocks = m / chemv_bs;
else
blocks = m / chemv_bs + 1;
blocks -= the_chosen_block_id;
dim3 grid(blocks, 1, 1);
dim3 threads(nb, 8, 1);
dim3 threads_u(nb, 1, 1);
/*
* If matrix size is multiple of chemv_bs, we use a specific code.
* otherwise, we call the generic case.
*/
if(m % chemv_bs == 0 )
{
magmablas_chemv_200_L_special_mgpu_offset_32 <<< grid, threads, 0, magma_stream >>>(
m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, my_gpu_id, num_gpus, nb, kstan);
}
else
{
magma_int_t m_mod_nb = m%chemv_bs - 1;
magmablas_chemv_200_L_generic_mgpu_offset_32 <<< grid, threads, 0, magma_stream >>> (
m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_nb, my_gpu_id, num_gpus, nb, kstan);
}
magmablas_chemv_200_L_update_mgpu_offset_32<<< grid, threads_u, 0, magma_stream >>>(
m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, my_gpu_id, num_gpus, nb, kstan);
}
/*******************************************************************************
* Functions for each specific cases - Upper case
*/
__global__ void
magmablas_chemv_200_U_special_mgpu_offset_32( magma_int_t n, cuFloatComplex alpha,
cuFloatComplex *A, magma_int_t lda,
cuFloatComplex *x, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex *y, magma_int_t incy,
cuFloatComplex *WC,
magma_int_t my_gpu_id,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t kstan)
{
magma_int_t tx = threadIdx.x ;
magma_int_t ty = threadIdx.y ;
magma_int_t blkc = blockIdx.x ;
cuFloatComplex res = MAGMA_C_ZERO;// used in scan the row
cuFloatComplex res_ = MAGMA_C_ZERO;// used in scan the column
cuFloatComplex res1 = MAGMA_C_ZERO;// tem for res
cuFloatComplex res2 = MAGMA_C_ZERO;// tem for res_
__shared__ cuFloatComplex la [chemv_bs][bank_shift];
__shared__ cuFloatComplex buff [chemv_bs];
__shared__ cuFloatComplex buff2 [chemv_bs];
magma_int_t break_d = chemv_bs * blkc;
x += (break_d + tx ) * incx;
A += break_d ;
A += ty * lda + tx ;
if( ty == 0 )
{
buff[tx] = x[0];
if(blkc == 0 && tx < kstan)
{
MAGMA_C_SET2REAL(buff[tx], 0.0);
}
} // obtain the vector x store in buff;
if ( (blkc % num_gpus) == my_gpu_id)
{
A += lda * (blkc/num_gpus) * chemv_bs; // change
#pragma unroll
for(magma_int_t j =0; j<chemv_bs; j +=8)
la[0][ bank_shift * (ty+j) + tx] = A[ j * lda];
__syncthreads();
#pragma unroll
for(magma_int_t i=ty*4; i<(ty * 4 + 4) ; i++){
if ( i > tx )
{
la[0][bank_shift * tx + i] = cuConjf(la[0][ i * bank_shift + tx]) ;
}
}
__syncthreads();
#pragma unroll
for(magma_int_t j=0; j < 4 ; j++)
res += cuConjf( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4];
__syncthreads();
A -= lda * (blkc/num_gpus) * chemv_bs;
}
__syncthreads();
x -= (break_d + tx ) * incx;// return to the beginning
x += (my_gpu_id ) * chemv_bs ;//
magma_int_t wc_c = my_gpu_id ;
magma_int_t total_blocks_gpu = gridDim.x /num_gpus;
if( my_gpu_id < ( gridDim.x % num_gpus) )
{
total_blocks_gpu += 1;
}
magma_int_t shift = (blkc +1) /num_gpus ;
if( my_gpu_id < ( (blkc+1) % num_gpus) )
{
shift += 1;
}
#pragma unroll
for(magma_int_t s=0; s<shift; s++)
{
x += num_gpus * chemv_bs;
A += lda * chemv_bs ;
wc_c += num_gpus;
}
WC += break_d + tx;
magma_int_t num_blocks_iters = total_blocks_gpu - shift;
magma_int_t count = 0;
for(magma_int_t s=0; s<num_blocks_iters; s++)
{
MAGMA_C_SET2REAL(res_,0);
count++;
#pragma unroll
for(magma_int_t j =0; j<chemv_bs; j +=8)
la[0][ bank_shift * (ty+j) + tx] = A[ j * lda];
if( ty == 0 )
{
buff2[tx] = x[tx];
} // obtain the vector x store in buff;
__syncthreads();
#pragma unroll
for(magma_int_t j=0; j < 4 ; j++)
{
res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8];
res_ += cuConjf( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum
}
__syncthreads();
la[0][bank_shift*tx+ty]= res_ ;
__syncthreads();
if( ty== 0 )
{
res2 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1]
+ la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3]
+ la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5]
+ la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7];
WC[wc_c*lda ] = res2;
}
__syncthreads();
wc_c += num_gpus;
x += num_gpus * chemv_bs;
A += lda * chemv_bs ;
}
la[0][bank_shift*tx+ty]= res ;
__syncthreads();
if( ty== 0 )
{
res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1]
+ la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3]
+ la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5]
+ la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7];
WC[0+lda*(blkc)] = res1;
}
}
__global__ void
magmablas_chemv_200_U_generic_mgpu_offset_32(magma_int_t n, cuFloatComplex alpha,
cuFloatComplex *A, magma_int_t lda,
cuFloatComplex *x, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex *y, magma_int_t incy,
cuFloatComplex *WC,
magma_int_t m_mod_thread_x,
magma_int_t my_gpu_id,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t kstan,
magma_int_t the_right_gpu)
{
magma_int_t tx = threadIdx.x ;
magma_int_t ty = threadIdx.y ;
magma_int_t blkc = blockIdx.x ;
cuFloatComplex res = MAGMA_C_ZERO;
cuFloatComplex res_ = MAGMA_C_ZERO;
cuFloatComplex res1 = MAGMA_C_ZERO;
cuFloatComplex res2 = MAGMA_C_ZERO;
__shared__ cuFloatComplex la [chemv_bs][bank_shift];
__shared__ cuFloatComplex buff [chemv_bs];
__shared__ cuFloatComplex buff2 [chemv_bs];
magma_int_t break_d = chemv_bs * blkc;
x += (break_d + tx ) * incx;
A += break_d ;
A += lda * ty;
magma_int_t trackA ;
if( blkc == ( gridDim.x - 1 ))
{
if( ty == 0 ){
if( tx > m_mod_thread_x )
{
MAGMA_C_SET2REAL(buff[tx],0);
}
else
buff[tx] = x[0];
}
if ( tx > m_mod_thread_x )
trackA=m_mod_thread_x;
else
trackA=tx;
A += trackA ;
}
else
{
if( ty == 0 )
{
buff[tx] = x[0];
}
A += tx ;
}
if(ty == 0 )
{
if(blkc ==0 && tx < kstan)//
{
MAGMA_C_SET2REAL(buff[tx], 0.0);
}
}
if ( (blkc % num_gpus) == my_gpu_id)
{
A += lda * (blkc/num_gpus) * chemv_bs; // change
if( blkc == ( gridDim.x - 1 ) ) {
#pragma unroll
for(magma_int_t j =0; j<chemv_bs; j+=8){
if( ( ty + j ) > m_mod_thread_x )
{
MAGMA_C_SET2REAL(la[0][bank_shift*(ty+j)+tx], 9999);
}
else
la[0][bank_shift*(ty+j)+tx] = A[ j * lda];
}
}
else {
#pragma unroll
for(magma_int_t j =0; j<chemv_bs; j+=8){
la[0][bank_shift*(ty+j)+tx] = A[ j * lda];
}
}
__syncthreads();
#pragma unroll
for(magma_int_t i=ty*4; i<(ty*4+4) ; i++){
if ( i > tx )
{
la[0][bank_shift * tx + i] = cuConjf(la[0][ i * bank_shift + tx]) ;
}
}
__syncthreads();
#pragma unroll
for(magma_int_t j=0; j < 4 ; j++)
res += cuConjf(la[0][bank_shift*tx+j+ty*4])* buff[j+ty*4];
__syncthreads();
A -= lda * (blkc/num_gpus) * chemv_bs;
}
x -= (break_d + tx ) * incx;// return to the beginning
x += (my_gpu_id ) * chemv_bs ;//
magma_int_t wc_c = my_gpu_id ;
magma_int_t total_blocks_gpu = gridDim.x /num_gpus;
if( my_gpu_id < ( gridDim.x % num_gpus) )
{
total_blocks_gpu += 1;
}
magma_int_t shift = (blkc +1) /num_gpus ;
if( my_gpu_id < ( (blkc+1) % num_gpus) )
{
shift += 1;
}
#pragma unroll
for(magma_int_t s=0; s<shift; s++)
{
x += num_gpus * chemv_bs;
A += lda * chemv_bs ;
wc_c += num_gpus;
}
WC += break_d + tx;
magma_int_t num_blocks_iters = total_blocks_gpu - shift;
magma_int_t count = 0;
for(magma_int_t s=0; s<num_blocks_iters; s++)
{
MAGMA_C_SET2REAL(res_,0);
count++;
if(my_gpu_id == the_right_gpu && s==num_blocks_iters-1)
{
if( ty == 0 )
{
if( tx > m_mod_thread_x )
{
MAGMA_C_SET2REAL(buff2[tx],0);
}
else
buff2[tx] = x[tx];
}
#pragma unroll
for(magma_int_t j =0; j<chemv_bs; j+=8)
{
if( ( ty + j ) > m_mod_thread_x )
{
MAGMA_C_SET2REAL(la[0][bank_shift*(ty+j)+tx], 0);
}
else
la[0][bank_shift*(ty+j)+tx] = A[ j * lda];
}
__syncthreads();
}// end of the_right_gpu
else
{
#pragma unroll
for(magma_int_t j =0; j<chemv_bs; j +=8)
la[0][ bank_shift * (ty+j) + tx] = A[ j * lda];
if( ty == 0 )
{
buff2[tx] = x[tx];
} // obtain the vector x store in buff;
__syncthreads();
}
#pragma unroll
for(magma_int_t j=0; j < 4 ; j++)
{
res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8];
res_ += cuConjf( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum
}
__syncthreads();
la[0][bank_shift*tx+ty]= res_ ;
__syncthreads();
if( ty== 0 )
{
res2 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1]
+ la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3]
+ la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5]
+ la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7];
WC[wc_c*lda ] = res2;
}
__syncthreads();
wc_c += num_gpus;
x += num_gpus * chemv_bs;
A += lda * chemv_bs ;
}
la[0][bank_shift*tx+ty]= res ;
__syncthreads();
if( ty== 0 )
{
res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1]
+ la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3]
+ la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5]
+ la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7];
WC[0+lda*(blkc)] = res1;
}
}
__global__ void
magmablas_chemv_200_U_update_mgpu_offset_32(magma_int_t n, cuFloatComplex alpha,
cuFloatComplex* A, magma_int_t lda,
cuFloatComplex *x, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex *y, magma_int_t incy,
cuFloatComplex *WC,
magma_int_t my_gpu_id,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t kstan )
{
magma_int_t i;
magma_int_t tx = threadIdx.x ;
magma_int_t ind = blockIdx.x * chemv_bs + tx ;
cuFloatComplex Ca;
MAGMA_C_SET2REAL(Ca, 0) ;
WC+= blockIdx.x * lda + tx;
for(i = 0; i<(blockIdx.x+1)*chemv_bs; i+= chemv_bs)
{
Ca += WC[0] ;
WC += chemv_bs ;
}
if( ind < n && ind >= kstan)
y[ind * incy] = beta * y[ind * incy] + alpha * Ca ;
}
extern "C"
void magmablas_chemv_200_U_mgpu_offset_32(magma_int_t m, cuFloatComplex alpha,
cuFloatComplex *A, magma_int_t lda,
cuFloatComplex *X, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex *Y, magma_int_t incy,
cuFloatComplex *dC_work,
magma_int_t my_gpu_id,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t offset,
magma_int_t num_blocks_skipped,
magma_int_t the_right_gpu)
{
magma_int_t the_chosen_block_id = offset / nb;
magma_int_t kstan = offset % nb;
A += lda * num_blocks_skipped * nb + the_chosen_block_id * nb;
X += the_chosen_block_id * nb;
Y += the_chosen_block_id * nb;
magma_int_t blocks;
if (m % chemv_bs==0)
blocks = m / chemv_bs;
else
blocks = m / chemv_bs + 1;
blocks -= the_chosen_block_id;
dim3 grid(blocks, 1, 1);
dim3 threads(nb, 8, 1);
dim3 threads_u(nb, 1, 1);
/*
* If matrix size is multiple of chemv_bs, we use a specific code.
* otherwise, we call the generic case.
*/
if(m % chemv_bs == 0 ) {
magmablas_chemv_200_U_special_mgpu_offset_32 <<< grid, threads, 0, magma_stream >>>(
m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, my_gpu_id, num_gpus, nb, kstan);
}
else{
magma_int_t m_mod_thread_x = m%chemv_bs - 1;
magmablas_chemv_200_U_generic_mgpu_offset_32 <<< grid, threads, 0, magma_stream >>> (
m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_thread_x, my_gpu_id, num_gpus, nb, kstan, the_right_gpu);
}
magmablas_chemv_200_U_update_mgpu_offset_32<<< grid, threads_u, 0, magma_stream >>>(
m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, my_gpu_id, num_gpus, nb, kstan);
}
/*************************************************************************
Purpose
=======
magmablas_chemv performs the matrix-vector operation on fermi:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n hermitian matrix.
Arguments
==========
UPLO - CHARACTER*1.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
UPLO = 'U' or 'u' Only the upper triangular part of A
is to be referenced.
UPLO = 'L' or 'l' Only the lower triangular part of A
is to be referenced.
Unchanged on exit.
N - INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
Unchanged on exit.
ALPHA - COMPLEX*16 .
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A - COMPLEX*16 array of DIMENSION ( LDA, n ).
Before entry with UPLO = 'U' or 'u', the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = 'L' or 'l', the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
Unchanged on exit.
LDA - INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. LDA must be at least
max( 1, n ).
Unchanged on exit.
It is recommended that lda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
X - COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
Unchanged on exit.
INCX - INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
Unchanged on exit.
BETA - COMPLEX*16 .
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
Unchanged on exit.
Y - COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
INCY - INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
Unchanged on exit.
*/
extern "C"
magma_int_t
magmablas_chemv_mgpu_32_offset( char uplo, magma_int_t n,
cuFloatComplex alpha,
cuFloatComplex **A, magma_int_t lda,
cuFloatComplex **X, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex **Y, magma_int_t incy,
cuFloatComplex **work, magma_int_t lwork,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t offset,
cudaStream_t stream[][10])
{
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return MAGMA_SUCCESS;
magma_int_t blocks = n / chemv_bs + (n % chemv_bs != 0);
magma_int_t workspace = lda * (blocks + 1);
if (lwork < workspace){
printf("Not enough work space in magmablas_chemv: passed %d, required %d\n",
lwork, workspace);
exit(1);
}
if(nb != 32)
{
printf("Error in magmablas_chemv_200_mgpu: nb != 32, program will exit! please reallocate your matrix among GPUs\n");
exit(0);
}
magma_int_t i = 0;
for(i=0; i<num_gpus; i++)
{
magma_setdevice(i);
magmablasSetKernelStream(stream[i][0]);
magma_int_t the_chosen_block_id = offset / nb;
magma_int_t the_chosen_gpu_id = the_chosen_block_id % num_gpus;
magma_int_t num_blocks_skipped = the_chosen_block_id / num_gpus;
if(i < the_chosen_gpu_id)
{
num_blocks_skipped += 1;
}
int new_gpu_id = ( i + num_gpus - the_chosen_gpu_id ) % num_gpus;
magma_int_t the_right_block_id = n / nb ;
magma_int_t the_right_gpu = the_right_block_id % num_gpus;
the_right_gpu = ( the_right_gpu + num_gpus - the_chosen_gpu_id ) % num_gpus;
// the_right_gpu is used in Upper generic case.
if ( upper)
{
magmablas_chemv_200_U_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i],
new_gpu_id, num_gpus, nb, offset, num_blocks_skipped, the_right_gpu);
}
else
{
magmablas_chemv_200_L_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i],
new_gpu_id, num_gpus, nb, offset, num_blocks_skipped);
}
}
return MAGMA_SUCCESS;
}
extern "C"
magma_int_t
magmablas_chemv2_mgpu_32_offset( char uplo, magma_int_t n,
cuFloatComplex alpha,
cuFloatComplex **A, magma_int_t lda,
cuFloatComplex **X, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex **Y, magma_int_t incy,
cuFloatComplex **work, magma_int_t lwork,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t offset)
{
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return MAGMA_SUCCESS;
magma_int_t blocks = n / chemv_bs + (n % chemv_bs != 0);
magma_int_t workspace = lda * (blocks + 1);
if (lwork < workspace){
printf("Not enough work space in magmablas_chemv: passed %d, required %d\n",
lwork, workspace);
exit(1);
}
if(nb != 32)
{
printf("Error in magmablas_chemv_200_mgpu: nb != 32, program will exit! please reallocate your matrix among GPUs\n");
exit(0);
}
magma_int_t i = 0;
for(i=0; i<num_gpus; i++)
{
magma_setdevice(i);
// magmablasSetKernelStream(stream[i][0]);
magma_int_t the_chosen_block_id = offset / nb;
magma_int_t the_chosen_gpu_id = the_chosen_block_id % num_gpus;
magma_int_t num_blocks_skipped = the_chosen_block_id / num_gpus;
if(i < the_chosen_gpu_id)
{
num_blocks_skipped += 1;
}
int new_gpu_id = ( i + num_gpus - the_chosen_gpu_id ) % num_gpus;
magma_int_t the_right_block_id = n / nb ;
magma_int_t the_right_gpu = the_right_block_id % num_gpus;
the_right_gpu = ( the_right_gpu + num_gpus - the_chosen_gpu_id ) % num_gpus;
// the_right_gpu is used in Upper generic case.
if ( upper)
{
magmablas_chemv_200_U_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i],
new_gpu_id, num_gpus, nb, offset, num_blocks_skipped, the_right_gpu);
}
else
magmablas_chemv_200_L_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i],
new_gpu_id, num_gpus, nb, offset, num_blocks_skipped);
}
return MAGMA_SUCCESS;
}
extern "C"
magma_int_t
magmablas_chemv2_mgpu_32( char uplo, magma_int_t n,
cuFloatComplex alpha,
cuFloatComplex **A, magma_int_t lda,
cuFloatComplex **X, magma_int_t incx,
cuFloatComplex beta,
cuFloatComplex **Y, magma_int_t incy,
cuFloatComplex **work, magma_int_t lwork,
magma_int_t num_gpus,
magma_int_t nb)
{
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return MAGMA_SUCCESS;
magma_int_t blocks = n / chemv_bs + (n % chemv_bs != 0);
magma_int_t workspace = lda * (blocks + 1);
if (lwork < workspace){
printf("Not enough work space in magmablas_chemv: passed %d, required %d\n",
lwork, workspace);
exit(1);
}
if(nb != 32)
{
printf("Error in magmablas_chemv_200_mgpu: nb != 32, program will exit! please reallocate your matrix among GPUs\n");
exit(0);
}
magma_int_t i = 0;
for(i=0; i<num_gpus; i++)
{
magma_setdevice(i);
magma_int_t the_right_block_id = n / nb ;
magma_int_t the_right_gpu = the_right_block_id % num_gpus;
// the_right_gpu is used in Upper generic case.
if ( upper)
{
magmablas_chemv_200_U_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i],
i, num_gpus, nb, 0, 0, the_right_gpu);
}
else
magmablas_chemv_200_L_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i],
i, num_gpus, nb, 0, 0);
}
return MAGMA_SUCCESS;
}
__global__ void
kernel_fillZero(cuFloatComplex *A, magma_int_t size)
{
magma_int_t id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size)
{
MAGMA_C_SET2REAL(A[id], 0.0);
}
}
void fillZero(cuFloatComplex *A, magma_int_t size)
{
magma_int_t blocks = (size-1)/512 + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(512, 1, 1);
kernel_fillZero<<<grid, threads>>>(A, size);
}
#endif /* (GPUSHMEM >= 200) */
|
460f56066da53317338669f60e1e4853322047a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/************************************************************************************\
* *
* Copyright 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
/**
* @brief pagerank 1
* @param row csr pointer array
* @param col csr column array
* @param data weight array
* @param page_rank1 pagerank array 1
* @param page_rank2 pagerank array 2
* @param num_nodes number of vertices
* @param num_edges number of edges
*/
__global__ void
pagerank1(int *row, int *col, int *data, float *page_rank1, float *page_rank2,
const int num_nodes, const int num_edges)
{
// Get my workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_nodes) {
// Get the starting and ending pointers of the neighborlist
int start = row[tid];
int end;
if (tid + 1 < num_nodes) {
end = row[tid + 1];
} else {
end = num_edges;
}
int nid;
// Navigate the neighbor list
for (int edge = start; edge < end; edge++) {
nid = col[edge];
// Transfer the PageRank value to neighbors
atomicAdd(&page_rank2[nid], page_rank1[tid] / (float)(end - start));
}
}
}
/**
* @brief pagerank 2
* @param row csr pointer array
* @param col csr column array
* @param data weight array
* @param page_rank1 pagerank array 1
* @param page_rank2 pagerank array 2
* @param num_nodes number of vertices
* @param num_edges number of edges
*/
__global__ void
pagerank2(int *row, int *col, int *data, float *page_rank1, float *page_rank2,
const int num_nodes, const int num_edges)
{
// Get my workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Update pagerank value with the damping factor
if (tid < num_nodes) {
page_rank1[tid] = 0.15 / (float)num_nodes + 0.85 * page_rank2[tid];
page_rank2[tid] = 0.0f;
}
}
/**
* @brief inibuffer
* @param row csr pointer array
* @param page_rank1 pagerank array 1
* @param page_rank2 pagerank array 2
* @param num_nodes number of vertices
*/
__global__ void
inibuffer(int *row, float *page_rank1, float *page_rank2, const int num_nodes,
const int num_edges)
{
// Get my thread id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_nodes) {
page_rank1[tid] = 1 / (float)num_nodes;
page_rank2[tid] = 0.0f;
}
}
| 460f56066da53317338669f60e1e4853322047a1.cu | /************************************************************************************\
* *
* Copyright � 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
/**
* @brief pagerank 1
* @param row csr pointer array
* @param col csr column array
* @param data weight array
* @param page_rank1 pagerank array 1
* @param page_rank2 pagerank array 2
* @param num_nodes number of vertices
* @param num_edges number of edges
*/
__global__ void
pagerank1(int *row, int *col, int *data, float *page_rank1, float *page_rank2,
const int num_nodes, const int num_edges)
{
// Get my workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_nodes) {
// Get the starting and ending pointers of the neighborlist
int start = row[tid];
int end;
if (tid + 1 < num_nodes) {
end = row[tid + 1];
} else {
end = num_edges;
}
int nid;
// Navigate the neighbor list
for (int edge = start; edge < end; edge++) {
nid = col[edge];
// Transfer the PageRank value to neighbors
atomicAdd(&page_rank2[nid], page_rank1[tid] / (float)(end - start));
}
}
}
/**
* @brief pagerank 2
* @param row csr pointer array
* @param col csr column array
* @param data weight array
* @param page_rank1 pagerank array 1
* @param page_rank2 pagerank array 2
* @param num_nodes number of vertices
* @param num_edges number of edges
*/
__global__ void
pagerank2(int *row, int *col, int *data, float *page_rank1, float *page_rank2,
const int num_nodes, const int num_edges)
{
// Get my workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Update pagerank value with the damping factor
if (tid < num_nodes) {
page_rank1[tid] = 0.15 / (float)num_nodes + 0.85 * page_rank2[tid];
page_rank2[tid] = 0.0f;
}
}
/**
* @brief inibuffer
* @param row csr pointer array
* @param page_rank1 pagerank array 1
* @param page_rank2 pagerank array 2
* @param num_nodes number of vertices
*/
__global__ void
inibuffer(int *row, float *page_rank1, float *page_rank2, const int num_nodes,
const int num_edges)
{
// Get my thread id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_nodes) {
page_rank1[tid] = 1 / (float)num_nodes;
page_rank2[tid] = 0.0f;
}
}
|
8aded522adfdc59f08d9a7c2501cc2aa37f699ee.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include "reference.h"
__global__
void gabor (
double *gabor_spatial,
const unsigned int height,
const unsigned int width,
const double center_y,
const double center_x,
const double ctheta,
const double stheta,
const double scale,
const double sx_2,
const double sy_2,
const double fx)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
double centered_x, centered_y, u, v;
if (x < width && y < height) {
centered_y = (double)y - center_y;
centered_x = (double)x - center_x;
u = ctheta * centered_x - stheta * centered_y;
v = ctheta * centered_y + stheta * centered_x;
gabor_spatial[y*width + x] = scale * exp(-0.5*(u*u/sx_2 + v*v/sy_2)) * cos(2.0*M_PI*fx*u);
}
}
double* generateGaborKernelDevice(
const int repeat,
const unsigned int height,
const unsigned int width,
const unsigned int par_T,
const double par_L,
const double theta)
{
const double sx = (double)par_T / (2.0*sqrt(2.0*log(2.0)));
const double sy = par_L * sx;
const double sx_2 = sx*sx;
const double sy_2 = sy*sy;
const double fx = 1.0 / (double)par_T;
const double ctheta = cos(theta);
const double stheta = sin(theta);
const double center_y = (double)height / 2.0;
const double center_x = (double)width / 2.0;
const double scale = 1.0/(2.0*M_PI*sx*sy);
size_t image_size_bytes = height * width * sizeof(double);
double *h_gabor_spatial = (double*) malloc (image_size_bytes);
double *d_gabor_spatial;
hipMalloc((void**)&d_gabor_spatial, image_size_bytes);
dim3 grids ((width+15)/16, (height+15)/16);
dim3 blocks (16, 16);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
hipLaunchKernelGGL(( gabor), dim3(grids), dim3(blocks), 0, 0, d_gabor_spatial,
height,
width,
center_y,
center_x,
ctheta,
stheta,
scale,
sx_2,
sy_2,
fx);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (us)\n", (time * 1e-3f) / repeat);
hipMemcpy(h_gabor_spatial, d_gabor_spatial, image_size_bytes, hipMemcpyDeviceToHost);
hipFree(d_gabor_spatial);
return h_gabor_spatial;
}
int main(int argc, char* argv[]) {
if (argc != 4) {
printf("Usage: %s <height> <width> <repeat>\n", argv[0]);
return 1;
}
const int height = atoi(argv[1]);
const int width = atoi(argv[2]);
const int repeat = atoi(argv[3]);
const unsigned int par_T = 13;
const double par_L = 2.65;
const double theta = 45;
double *h_filter = generateGaborKernelHost(height, width, par_T, par_L, theta);
double *d_filter = generateGaborKernelDevice(repeat, height, width, par_T, par_L, theta);
bool ok = true;
for (int i = 0; i < width * height; i++) {
if (fabs(h_filter[i] - d_filter[i]) > 1e-3) {
ok = false;
break;
}
}
printf("%s\n", ok ? "PASS" : "FAIL");
free(h_filter);
free(d_filter);
}
| 8aded522adfdc59f08d9a7c2501cc2aa37f699ee.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <cuda.h>
#include "reference.h"
__global__
void gabor (
double *gabor_spatial,
const unsigned int height,
const unsigned int width,
const double center_y,
const double center_x,
const double ctheta,
const double stheta,
const double scale,
const double sx_2,
const double sy_2,
const double fx)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
double centered_x, centered_y, u, v;
if (x < width && y < height) {
centered_y = (double)y - center_y;
centered_x = (double)x - center_x;
u = ctheta * centered_x - stheta * centered_y;
v = ctheta * centered_y + stheta * centered_x;
gabor_spatial[y*width + x] = scale * exp(-0.5*(u*u/sx_2 + v*v/sy_2)) * cos(2.0*M_PI*fx*u);
}
}
double* generateGaborKernelDevice(
const int repeat,
const unsigned int height,
const unsigned int width,
const unsigned int par_T,
const double par_L,
const double theta)
{
const double sx = (double)par_T / (2.0*sqrt(2.0*log(2.0)));
const double sy = par_L * sx;
const double sx_2 = sx*sx;
const double sy_2 = sy*sy;
const double fx = 1.0 / (double)par_T;
const double ctheta = cos(theta);
const double stheta = sin(theta);
const double center_y = (double)height / 2.0;
const double center_x = (double)width / 2.0;
const double scale = 1.0/(2.0*M_PI*sx*sy);
size_t image_size_bytes = height * width * sizeof(double);
double *h_gabor_spatial = (double*) malloc (image_size_bytes);
double *d_gabor_spatial;
cudaMalloc((void**)&d_gabor_spatial, image_size_bytes);
dim3 grids ((width+15)/16, (height+15)/16);
dim3 blocks (16, 16);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
gabor<<<grids, blocks>>>(d_gabor_spatial,
height,
width,
center_y,
center_x,
ctheta,
stheta,
scale,
sx_2,
sy_2,
fx);
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (us)\n", (time * 1e-3f) / repeat);
cudaMemcpy(h_gabor_spatial, d_gabor_spatial, image_size_bytes, cudaMemcpyDeviceToHost);
cudaFree(d_gabor_spatial);
return h_gabor_spatial;
}
int main(int argc, char* argv[]) {
if (argc != 4) {
printf("Usage: %s <height> <width> <repeat>\n", argv[0]);
return 1;
}
const int height = atoi(argv[1]);
const int width = atoi(argv[2]);
const int repeat = atoi(argv[3]);
const unsigned int par_T = 13;
const double par_L = 2.65;
const double theta = 45;
double *h_filter = generateGaborKernelHost(height, width, par_T, par_L, theta);
double *d_filter = generateGaborKernelDevice(repeat, height, width, par_T, par_L, theta);
bool ok = true;
for (int i = 0; i < width * height; i++) {
if (fabs(h_filter[i] - d_filter[i]) > 1e-3) {
ok = false;
break;
}
}
printf("%s\n", ok ? "PASS" : "FAIL");
free(h_filter);
free(d_filter);
}
|
c2a004b1eb4aa953cb17ede08435bd0b95203fa9.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2020 Xiaomi Corporation (authors: Fangjun Kuang)
*
* See LICENSE for clarification regarding multiple authors
*/
#include <cmath>
#include <random>
#include <type_traits>
#ifdef K2_WITH_CUDA
#include "hiprand/hiprand.h" // NOLINT
#include "hiprand/hiprand_kernel.h" // NOLINT
#endif
#include "k2/csrc/rand.h"
namespace k2 {
namespace {
// when calling hiprand_init() in kernels, its arguments
// seed and offset are from this struct. All kernels
// share the same seed and offset.
struct CudaRandState {
// the default value for seed is from
// https://github.com/pytorch/pytorch/blob/master/c10/core/GeneratorImpl.h#L56
//
// It has a good distribution of 0s and 1s in bit representation.
uint64_t seed = 67280421310721u;
uint64_t offset = 0;
};
struct CpuRandState {
uint64_t seed = std::mt19937::default_seed;
std::mt19937 generator;
};
static CudaRandState &GetCudaRandState(ContextPtr context) {
int32_t device_id = context->GetDeviceId();
K2_CHECK_LT(device_id, kMaxNumGpus);
static CudaRandState rand_states[kMaxNumGpus];
return rand_states[device_id];
} // namespace
static CpuRandState &GetCpuRandState() {
static thread_local CpuRandState state;
return state;
}
template <typename T, typename Distribution>
static void RandCpu(int32_t dim, T low, T high, T *out) {
Distribution distribution(low, high);
auto &generator = GetCpuRandState().generator;
for (int32_t i = 0; i != dim; ++i) {
out[i] = distribution(generator);
}
}
} // namespace
uint64_t GetSeed(ContextPtr context) {
DeviceType device_type = context->GetDeviceType();
if (device_type == kCuda) return GetCudaRandState(context).seed;
K2_CHECK_EQ(device_type, kCpu);
return GetCpuRandState().seed;
}
void SetSeed(ContextPtr context, uint64_t seed) {
DeviceType device_type = context->GetDeviceType();
if (device_type == kCuda) {
// TODO(fangjun): we may need a lock here
CudaRandState &state = GetCudaRandState(context);
state.seed = seed;
state.offset = 0;
return;
}
K2_CHECK_EQ(device_type, kCpu);
CpuRandState &state = GetCpuRandState();
state.seed = seed;
state.generator.seed(seed);
}
template <>
void Rand<float>(ContextPtr context, float low, float high, int32_t dim,
float *array_data) {
K2_CHECK_LT(low, high);
if (dim == 0) return;
DeviceType device_type = context->GetDeviceType();
if (device_type == kCpu) {
RandCpu<float, std::uniform_real_distribution<float>>(dim, low, high,
array_data);
return;
}
K2_CHECK_EQ(device_type, kCuda);
#ifdef K2_WITH_CUDA
CudaRandState &state = GetCudaRandState(context);
float range = high - low;
auto generate_rand_lambda_float = [=] __device__(int32_t i) {
hiprandStatePhilox4_32_10_t philox_state;
hiprand_init(state.seed,
i, // sequence
state.offset, &philox_state);
float4 r = hiprand_uniform4(&philox_state);
// hiprand_uniform4() returns a number in (0, 1],
// we want to transform it to [0, 1)
//
// CAUTION: `1 - r.x` is not used here as it may be rounded up to 1
// when `r.x` is close to 0
float t = (r.x == 1.0f) ? 0.0f : r.x;
array_data[i] = t * range + low;
};
EvalDevice(context, dim, generate_rand_lambda_float);
state.offset += 4;
#else
K2_LOG(FATAL) << "Unreachable code";
#endif
}
template <>
void Rand<double>(ContextPtr context, double low, double high, int32_t dim,
double *array_data) {
K2_CHECK_LT(low, high);
if (dim == 0) return;
DeviceType device_type = context->GetDeviceType();
if (device_type == kCpu) {
RandCpu<double, std::uniform_real_distribution<double>>(dim, low, high,
array_data);
return;
}
#ifdef K2_WITH_CUDA
K2_CHECK_EQ(device_type, kCuda);
CudaRandState &state = GetCudaRandState(context);
double range = high - low;
auto generate_rand_lambda_double = [=] __device__(int32_t i) {
hiprandStatePhilox4_32_10_t philox_state;
hiprand_init(state.seed,
i, // sequence
state.offset, &philox_state);
double2 r = hiprand_uniform2_double(&philox_state);
double t = (r.x == 1.0) ? 0.0 : r.x;
array_data[i] = t * range + low;
};
EvalDevice(context, dim, generate_rand_lambda_double);
state.offset += 4;
#else
K2_LOG(FATAL) << "Unreachable code.";
#endif
}
template <>
void Rand<int32_t>(ContextPtr context, int32_t low, int32_t high, int32_t dim,
int32_t *array_data) {
K2_CHECK_LT(low, high);
if (dim == 0) return;
DeviceType device_type = context->GetDeviceType();
if (device_type == kCpu) {
RandCpu<int32_t, std::uniform_int_distribution<int32_t>>(
dim, low, high - 1, // -1 since high is to be excluded
array_data);
return;
}
#ifdef K2_WITH_CUDA
K2_CHECK_EQ(device_type, kCuda);
CudaRandState &state = GetCudaRandState(context);
uint32_t range = high - low;
auto generate_rand_lambda_double = [=] __device__(int32_t i) {
hiprandStatePhilox4_32_10_t philox_state;
hiprand_init(state.seed,
i, // sequence
state.offset, &philox_state);
uint4 r = hiprand4(&philox_state);
int32_t t = static_cast<int32_t>(r.x % range + low);
array_data[i] = t;
};
EvalDevice(context, dim, generate_rand_lambda_double);
state.offset += 4;
#else
K2_LOG(FATAL) << "Unreachable code.";
#endif
}
} // namespace k2
| c2a004b1eb4aa953cb17ede08435bd0b95203fa9.cu | /**
* Copyright (c) 2020 Xiaomi Corporation (authors: Fangjun Kuang)
*
* See LICENSE for clarification regarding multiple authors
*/
#include <cmath>
#include <random>
#include <type_traits>
#ifdef K2_WITH_CUDA
#include "curand.h" // NOLINT
#include "curand_kernel.h" // NOLINT
#endif
#include "k2/csrc/rand.h"
namespace k2 {
namespace {
// when calling curand_init() in kernels, its arguments
// seed and offset are from this struct. All kernels
// share the same seed and offset.
struct CudaRandState {
// the default value for seed is from
// https://github.com/pytorch/pytorch/blob/master/c10/core/GeneratorImpl.h#L56
//
// It has a good distribution of 0s and 1s in bit representation.
uint64_t seed = 67280421310721u;
uint64_t offset = 0;
};
struct CpuRandState {
uint64_t seed = std::mt19937::default_seed;
std::mt19937 generator;
};
static CudaRandState &GetCudaRandState(ContextPtr context) {
int32_t device_id = context->GetDeviceId();
K2_CHECK_LT(device_id, kMaxNumGpus);
static CudaRandState rand_states[kMaxNumGpus];
return rand_states[device_id];
} // namespace
static CpuRandState &GetCpuRandState() {
static thread_local CpuRandState state;
return state;
}
template <typename T, typename Distribution>
static void RandCpu(int32_t dim, T low, T high, T *out) {
Distribution distribution(low, high);
auto &generator = GetCpuRandState().generator;
for (int32_t i = 0; i != dim; ++i) {
out[i] = distribution(generator);
}
}
} // namespace
uint64_t GetSeed(ContextPtr context) {
DeviceType device_type = context->GetDeviceType();
if (device_type == kCuda) return GetCudaRandState(context).seed;
K2_CHECK_EQ(device_type, kCpu);
return GetCpuRandState().seed;
}
void SetSeed(ContextPtr context, uint64_t seed) {
DeviceType device_type = context->GetDeviceType();
if (device_type == kCuda) {
// TODO(fangjun): we may need a lock here
CudaRandState &state = GetCudaRandState(context);
state.seed = seed;
state.offset = 0;
return;
}
K2_CHECK_EQ(device_type, kCpu);
CpuRandState &state = GetCpuRandState();
state.seed = seed;
state.generator.seed(seed);
}
template <>
void Rand<float>(ContextPtr context, float low, float high, int32_t dim,
float *array_data) {
K2_CHECK_LT(low, high);
if (dim == 0) return;
DeviceType device_type = context->GetDeviceType();
if (device_type == kCpu) {
RandCpu<float, std::uniform_real_distribution<float>>(dim, low, high,
array_data);
return;
}
K2_CHECK_EQ(device_type, kCuda);
#ifdef K2_WITH_CUDA
CudaRandState &state = GetCudaRandState(context);
float range = high - low;
auto generate_rand_lambda_float = [=] __device__(int32_t i) {
curandStatePhilox4_32_10_t philox_state;
curand_init(state.seed,
i, // sequence
state.offset, &philox_state);
float4 r = curand_uniform4(&philox_state);
// curand_uniform4() returns a number in (0, 1],
// we want to transform it to [0, 1)
//
// CAUTION: `1 - r.x` is not used here as it may be rounded up to 1
// when `r.x` is close to 0
float t = (r.x == 1.0f) ? 0.0f : r.x;
array_data[i] = t * range + low;
};
EvalDevice(context, dim, generate_rand_lambda_float);
state.offset += 4;
#else
K2_LOG(FATAL) << "Unreachable code";
#endif
}
template <>
void Rand<double>(ContextPtr context, double low, double high, int32_t dim,
double *array_data) {
K2_CHECK_LT(low, high);
if (dim == 0) return;
DeviceType device_type = context->GetDeviceType();
if (device_type == kCpu) {
RandCpu<double, std::uniform_real_distribution<double>>(dim, low, high,
array_data);
return;
}
#ifdef K2_WITH_CUDA
K2_CHECK_EQ(device_type, kCuda);
CudaRandState &state = GetCudaRandState(context);
double range = high - low;
auto generate_rand_lambda_double = [=] __device__(int32_t i) {
curandStatePhilox4_32_10_t philox_state;
curand_init(state.seed,
i, // sequence
state.offset, &philox_state);
double2 r = curand_uniform2_double(&philox_state);
double t = (r.x == 1.0) ? 0.0 : r.x;
array_data[i] = t * range + low;
};
EvalDevice(context, dim, generate_rand_lambda_double);
state.offset += 4;
#else
K2_LOG(FATAL) << "Unreachable code.";
#endif
}
template <>
void Rand<int32_t>(ContextPtr context, int32_t low, int32_t high, int32_t dim,
int32_t *array_data) {
K2_CHECK_LT(low, high);
if (dim == 0) return;
DeviceType device_type = context->GetDeviceType();
if (device_type == kCpu) {
RandCpu<int32_t, std::uniform_int_distribution<int32_t>>(
dim, low, high - 1, // -1 since high is to be excluded
array_data);
return;
}
#ifdef K2_WITH_CUDA
K2_CHECK_EQ(device_type, kCuda);
CudaRandState &state = GetCudaRandState(context);
uint32_t range = high - low;
auto generate_rand_lambda_double = [=] __device__(int32_t i) {
curandStatePhilox4_32_10_t philox_state;
curand_init(state.seed,
i, // sequence
state.offset, &philox_state);
uint4 r = curand4(&philox_state);
int32_t t = static_cast<int32_t>(r.x % range + low);
array_data[i] = t;
};
EvalDevice(context, dim, generate_rand_lambda_double);
state.offset += 4;
#else
K2_LOG(FATAL) << "Unreachable code.";
#endif
}
} // namespace k2
|
ac1528b883106cca40580b80f0cf2e967f0da7f2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <vector>
#include <hip/hip_runtime.h>
// Example
// https://pytorch.org/docs/stable/generated/torch.flip.html
template <typename scalar_t>
__global__ void flip_kernel(
const scalar_t* in_tensor,
scalar_t* out_tensor,
int64_t n,
const int64_t* flip_dims,
const int64_t flip_dims_size,
const int64_t* strides,
const int64_t* strides_contiguous,
const int64_t* shape,
const int64_t total_dims)
{
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= n) return;
int64_t cur_indices = linear_index;
int64_t rem = 0;
int64_t dst_offset = 0;
for (int64_t i = 0; i < total_dims; i++) {
int64_t temp = cur_indices;
cur_indices = cur_indices / strides_contiguous[i];
rem = temp - cur_indices * strides_contiguous[i];
for (int64_t j = 0; j < flip_dims_size; j++) {
// flip the indices if it is in flip_dims
if (i == flip_dims[j]) {
cur_indices = shape[i] - 1 - cur_indices;
}
}
dst_offset += cur_indices * strides[i];
cur_indices = rem;
}
out_tensor[linear_index] = in_tensor[dst_offset];
}
// display the values of a property in a tensor
void property (const char* name, std::vector<int64_t> p)
{
printf("%s: ( ", name);
for (uint64_t i = 0; i < p.size(); i++) {
printf("%lu ", p[i]);
}
printf(")\n");
}
template <typename scalar_t>
void flip (const int64_t num_dims, const int64_t num_flip_dims,
const int32_t dim_size, const int32_t repeat)
{
std::vector<int64_t> flip;
std::vector<int64_t> shape;
std::vector<int64_t> stride;
for (int64_t i = 0; i < num_dims; i++) {
#ifdef EXAMPLE
shape.push_back(2);
#else
shape.push_back(dim_size);
#endif
}
int64_t n = 1;
for (int64_t i = 0; i < num_dims; i++) {
n = n * shape[i];
}
for (int64_t i = 0; i < num_flip_dims; i++) {
flip.push_back(i);
}
stride.push_back(shape[1] * shape[2]);
stride.push_back(shape[2]);
stride.push_back(1);
property("shape", shape);
property("flip_dims", flip);
property("stride", stride);
int64_t dims_bytes = num_dims * sizeof(int64_t);
int64_t flip_dims_bytes = num_flip_dims * sizeof(int64_t);
int64_t input_size_bytes = n * sizeof(scalar_t);
int64_t output_size_bytes = input_size_bytes;
scalar_t *input = (scalar_t*) malloc (input_size_bytes);
for (int i = 0; i < n; i++) {
input[i] = (scalar_t) i;
}
scalar_t *output = (scalar_t*) malloc(output_size_bytes);
scalar_t *d_input, *d_output;
hipMalloc((void**)&d_input, input_size_bytes);
hipMemcpy(d_input, input, input_size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_output, output_size_bytes);
int64_t *d_flip_dims, *d_shape, *d_strides, *d_strides_contiguous;
hipMalloc((void**)&d_flip_dims, flip_dims_bytes);
hipMemcpy(d_flip_dims, flip.data(), flip_dims_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_shape, dims_bytes);
hipMemcpy(d_shape, shape.data(), dims_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_strides, dims_bytes);
hipMemcpy(d_strides, stride.data(), dims_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_strides_contiguous, dims_bytes);
hipMemcpy(d_strides_contiguous, stride.data(), dims_bytes, hipMemcpyHostToDevice);
const int threadsPerBlock = 256;
dim3 grid ((n + threadsPerBlock - 1) / threadsPerBlock);
dim3 block (threadsPerBlock);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
hipLaunchKernelGGL(( flip_kernel<scalar_t>), dim3(grid), dim3(block), 0, 0,
d_input,
d_output,
n,
d_flip_dims,
num_flip_dims,
d_strides,
d_strides_contiguous,
d_shape,
num_dims);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of the flip kernel: %f (ms)\n", (time * 1e-6f) / repeat);
hipMemcpy(output, d_output, output_size_bytes, hipMemcpyDeviceToHost);
#ifdef EXAMPLE
for (int i = 0; i < n; i++) {
printf("%f ", output[i]);
}
printf("\n");
#endif
free(input);
free(output);
hipFree(d_input);
hipFree(d_output);
hipFree(d_flip_dims);
hipFree(d_shape);
hipFree(d_strides);
hipFree(d_strides_contiguous);
}
int main(int argc, char* argv[])
{
if (argc != 4) {
printf("Usage: %s <number of dimensions> <size of each dimension> <repeat>\n", argv[0]);
return 1;
}
const int64_t num_dims = atoi(argv[1]);
const int64_t dim_size = atoi(argv[2]);
const int32_t repeat = atoi(argv[3]);
#ifdef EXAMPLE
const int64_t num_flip_dims = 2;
#else
const int64_t num_flip_dims = num_dims;
#endif
printf("=========== Data type is FP32 ==========\n");
flip<float>(num_dims, num_flip_dims, dim_size, repeat);
printf("=========== Data type is FP64 ==========\n");
flip<double>(num_dims, num_flip_dims, dim_size, repeat);
return 0;
}
| ac1528b883106cca40580b80f0cf2e967f0da7f2.cu | #include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <vector>
#include <hip/hip_runtime.h>
// Example
// https://pytorch.org/docs/stable/generated/torch.flip.html
template <typename scalar_t>
__global__ void flip_kernel(
const scalar_t* in_tensor,
scalar_t* out_tensor,
int64_t n,
const int64_t* flip_dims,
const int64_t flip_dims_size,
const int64_t* strides,
const int64_t* strides_contiguous,
const int64_t* shape,
const int64_t total_dims)
{
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= n) return;
int64_t cur_indices = linear_index;
int64_t rem = 0;
int64_t dst_offset = 0;
for (int64_t i = 0; i < total_dims; i++) {
int64_t temp = cur_indices;
cur_indices = cur_indices / strides_contiguous[i];
rem = temp - cur_indices * strides_contiguous[i];
for (int64_t j = 0; j < flip_dims_size; j++) {
// flip the indices if it is in flip_dims
if (i == flip_dims[j]) {
cur_indices = shape[i] - 1 - cur_indices;
}
}
dst_offset += cur_indices * strides[i];
cur_indices = rem;
}
out_tensor[linear_index] = in_tensor[dst_offset];
}
// display the values of a property in a tensor
void property (const char* name, std::vector<int64_t> p)
{
printf("%s: ( ", name);
for (uint64_t i = 0; i < p.size(); i++) {
printf("%lu ", p[i]);
}
printf(")\n");
}
template <typename scalar_t>
void flip (const int64_t num_dims, const int64_t num_flip_dims,
const int32_t dim_size, const int32_t repeat)
{
std::vector<int64_t> flip;
std::vector<int64_t> shape;
std::vector<int64_t> stride;
for (int64_t i = 0; i < num_dims; i++) {
#ifdef EXAMPLE
shape.push_back(2);
#else
shape.push_back(dim_size);
#endif
}
int64_t n = 1;
for (int64_t i = 0; i < num_dims; i++) {
n = n * shape[i];
}
for (int64_t i = 0; i < num_flip_dims; i++) {
flip.push_back(i);
}
stride.push_back(shape[1] * shape[2]);
stride.push_back(shape[2]);
stride.push_back(1);
property("shape", shape);
property("flip_dims", flip);
property("stride", stride);
int64_t dims_bytes = num_dims * sizeof(int64_t);
int64_t flip_dims_bytes = num_flip_dims * sizeof(int64_t);
int64_t input_size_bytes = n * sizeof(scalar_t);
int64_t output_size_bytes = input_size_bytes;
scalar_t *input = (scalar_t*) malloc (input_size_bytes);
for (int i = 0; i < n; i++) {
input[i] = (scalar_t) i;
}
scalar_t *output = (scalar_t*) malloc(output_size_bytes);
scalar_t *d_input, *d_output;
hipMalloc((void**)&d_input, input_size_bytes);
hipMemcpy(d_input, input, input_size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_output, output_size_bytes);
int64_t *d_flip_dims, *d_shape, *d_strides, *d_strides_contiguous;
hipMalloc((void**)&d_flip_dims, flip_dims_bytes);
hipMemcpy(d_flip_dims, flip.data(), flip_dims_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_shape, dims_bytes);
hipMemcpy(d_shape, shape.data(), dims_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_strides, dims_bytes);
hipMemcpy(d_strides, stride.data(), dims_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_strides_contiguous, dims_bytes);
hipMemcpy(d_strides_contiguous, stride.data(), dims_bytes, hipMemcpyHostToDevice);
const int threadsPerBlock = 256;
dim3 grid ((n + threadsPerBlock - 1) / threadsPerBlock);
dim3 block (threadsPerBlock);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
flip_kernel<scalar_t><<<grid, block>>> (
d_input,
d_output,
n,
d_flip_dims,
num_flip_dims,
d_strides,
d_strides_contiguous,
d_shape,
num_dims);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of the flip kernel: %f (ms)\n", (time * 1e-6f) / repeat);
hipMemcpy(output, d_output, output_size_bytes, hipMemcpyDeviceToHost);
#ifdef EXAMPLE
for (int i = 0; i < n; i++) {
printf("%f ", output[i]);
}
printf("\n");
#endif
free(input);
free(output);
hipFree(d_input);
hipFree(d_output);
hipFree(d_flip_dims);
hipFree(d_shape);
hipFree(d_strides);
hipFree(d_strides_contiguous);
}
int main(int argc, char* argv[])
{
if (argc != 4) {
printf("Usage: %s <number of dimensions> <size of each dimension> <repeat>\n", argv[0]);
return 1;
}
const int64_t num_dims = atoi(argv[1]);
const int64_t dim_size = atoi(argv[2]);
const int32_t repeat = atoi(argv[3]);
#ifdef EXAMPLE
const int64_t num_flip_dims = 2;
#else
const int64_t num_flip_dims = num_dims;
#endif
printf("=========== Data type is FP32 ==========\n");
flip<float>(num_dims, num_flip_dims, dim_size, repeat);
printf("=========== Data type is FP64 ==========\n");
flip<double>(num_dims, num_flip_dims, dim_size, repeat);
return 0;
}
|
e1b6ea2e741a953d1da6d37940a2e88116e7a704.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define threshold 5 //(50% probability)
#define block_size 256
__global__ void calculation( char* dev_a,
char* dev_b,
char* dev_c,
int num_matrices,
int matrix_size ) {
// Each thread handles a matrix
int k = (blockIdx.x*blockDim.x) + threadIdx.x; // this thread handles the data at its thread id
if (k >= num_matrices) return;
// If first element is different than 0 do the computation
if (dev_a[k*matrix_size*matrix_size] != 0){
for (int j = 0; j < matrix_size; j++){
//If first value in the row of the matrix, do addition
if (dev_a[k*matrix_size*matrix_size+j*matrix_size] < threshold){
for (int i = 0; i < matrix_size; i++){
int index = k*matrix_size*matrix_size+j*matrix_size+i;
dev_c[index] = dev_a[index] + dev_b[index];
}
//Do subtraction
} else {
for (int i = 0; i < matrix_size; i++){
int index = k*matrix_size*matrix_size+j*matrix_size+i;
dev_c[index] = dev_a[index] - dev_b[index];
}
}
}
}
}
int main( int argc, char* argv[] ) {
// Parse Input arguments
// Check the number of arguments (we only receive command + vector size)
if (argc != 3) {
// Tell the user how to run the program
printf ("Usage:\n%s <number of matrices> <matrix_size>\n", argv[0]);
// "Usage messages" are a conventional way of telling the user
// how to run a program if they enter the command incorrectly.
return -1;
}
srand ( time(NULL) );
// Set variables with input arguments
int num_matrices = atoi(argv[1]);
int matrix_size = atoi(argv[2]);
// Set device that we will use for our cuda code
hipSetDevice(0);
// Time Variables
hipEvent_t stp_start, stp_stop;
hipEvent_t cpu_start, cpu_stop;
hipEvent_t gpu_start, gpu_stop;
hipEvent_t ker_start, ker_stop;
hipEventCreate (&stp_start);
hipEventCreate (&stp_stop);
hipEventCreate (&cpu_start);
hipEventCreate (&cpu_stop);
hipEventCreate (&gpu_start);
hipEventCreate (&gpu_stop);
hipEventCreate (&ker_start);
hipEventCreate (&ker_stop);
float time, ker_time;
// Input Arrays and variables
char *a = new char [num_matrices*matrix_size*matrix_size];
char *b = new char [num_matrices*matrix_size*matrix_size];
char *c_cpu = new char [num_matrices*matrix_size*matrix_size];
char *c_gpu = new char [num_matrices*matrix_size*matrix_size];
// Pointers in GPU memory
char *dev_a;
char *dev_b;
char *dev_c;
//
// Fill arrays
//////////////////
hipEventRecord(stp_start,0);
#if defined(_OPENMP)
printf("Setting up input arrays in parallel.\n");
omp_set_num_threads(8);
#else
printf("Setting up input arrays.\n");
#endif
#pragma omp parallel for
for (int k = 0; k < num_matrices; k++) {
#if defined(_OPENMP)
if (k == 0) printf ("Using %d threads.\n", omp_get_num_threads());
#endif
for (int j = 0; j < matrix_size*matrix_size; j++){
a[k*matrix_size*matrix_size + j] = j%9+1;
b[k*matrix_size*matrix_size + j] = j%10;
c_cpu[k*matrix_size*matrix_size + j] = 0;
c_gpu[k*matrix_size*matrix_size + j] = 0;
}
}
hipEventRecord(stp_stop,0);
hipEventSynchronize(stp_stop);
hipEventElapsedTime(&time, stp_start, stp_stop);
printf("\tSetup Time: %.2f ms\n", time);
//
// CPU Calculation
//////////////////
printf("Running sequential job.\n");
hipEventRecord(cpu_start,0);
// Calculate C in the CPU
for (int k = 0; k < num_matrices; k++) {
// If first element is different than 0 do the computation
if (a[k*matrix_size*matrix_size] != 0){
for (int j = 0; j < matrix_size; j++){
//If first value in the row of the matrix, do addition
if (a[k*matrix_size*matrix_size+j*matrix_size] < threshold){
for (int i = 0; i < matrix_size; i++){
int index = k*matrix_size*matrix_size+j*matrix_size+i;
c_cpu[index] = a[index] + b[index];
}
//Do subtraction
} else {
for (int i = 0; i < matrix_size; i++){
int index = k*matrix_size*matrix_size+j*matrix_size+i;
c_cpu[index] = a[index] - b[index];
}
}
}
}
}
hipEventRecord(cpu_stop,0);
hipEventSynchronize(cpu_stop);
hipEventElapsedTime(&time, cpu_start, cpu_stop);
printf("\tSequential Job Time: %.2f ms\n", time);
//
// GPU Calculation
//////////////////
printf("Running parallel job.\n");
int grid_size = ((num_matrices-1)/block_size) + 1;
hipEventRecord(gpu_start,0);
// allocate the memory on the GPU
hipMalloc( (void**)&dev_a, num_matrices * matrix_size * matrix_size * sizeof(char) );
hipMalloc( (void**)&dev_b, num_matrices * matrix_size * matrix_size * sizeof(char) );
hipMalloc( (void**)&dev_c, num_matrices * matrix_size * matrix_size * sizeof(char) );
// set arrays to 0
hipMemset(dev_a, 0, num_matrices * matrix_size * matrix_size * sizeof(char));
hipMemset(dev_b, 0, num_matrices * matrix_size * matrix_size * sizeof(char));
hipMemset(dev_c, 0, num_matrices * matrix_size * matrix_size * sizeof(char));
// copy the 'data' to the GPU
hipMemcpy( dev_a, a, num_matrices * matrix_size * matrix_size * sizeof(char), hipMemcpyHostToDevice );
hipMemcpy( dev_b, b, num_matrices * matrix_size * matrix_size * sizeof(char), hipMemcpyHostToDevice );
// run kernel
hipEventRecord(ker_start,0);
hipLaunchKernelGGL(( calculation), dim3(grid_size),dim3(block_size), 0, 0, dev_a,
dev_b,
dev_c,
num_matrices,
matrix_size );
hipEventRecord(ker_stop,0);
// copy the array 'c' back from the GPU to the CPU
hipMemcpy( c_gpu, dev_c, num_matrices * matrix_size * matrix_size * sizeof(char), hipMemcpyDeviceToHost );
hipEventRecord(gpu_stop,0);
hipEventSynchronize(gpu_stop);
hipEventElapsedTime(&time , gpu_start, gpu_stop);
hipEventElapsedTime(&ker_time, ker_start, ker_stop);
printf("\tParallel Job Time: %.2f ms\n", time);
printf("\tKernel Exec. Time: %.2f ms\n", ker_time);
//
// Compare Results
//////////////////
int error = 0;
for (int i = 0; i < num_matrices * matrix_size * matrix_size; i++) {
if (c_cpu[i] != c_gpu[i]){
error = 1;
printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] );
}
if (error) break;
}
if (error == 0){
printf ("Correct result. No errors were found.\n");
}
//
// Free resources
//////////////////
// free the memory allocated on the GPU
hipFree( dev_a );
hipFree( dev_b );
hipFree( dev_c );
// free cuda events
hipEventDestroy (cpu_start);
hipEventDestroy (gpu_start);
hipEventDestroy (ker_start);
hipEventDestroy (cpu_stop);
hipEventDestroy (gpu_stop);
hipEventDestroy (ker_stop);
// free CPU memory
free(a);
free(b);
free(c_cpu);
free(c_gpu);
return 0;
}
| e1b6ea2e741a953d1da6d37940a2e88116e7a704.cu | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define threshold 5 //(50% probability)
#define block_size 256
__global__ void calculation( char* dev_a,
char* dev_b,
char* dev_c,
int num_matrices,
int matrix_size ) {
// Each thread handles a matrix
int k = (blockIdx.x*blockDim.x) + threadIdx.x; // this thread handles the data at its thread id
if (k >= num_matrices) return;
// If first element is different than 0 do the computation
if (dev_a[k*matrix_size*matrix_size] != 0){
for (int j = 0; j < matrix_size; j++){
//If first value in the row of the matrix, do addition
if (dev_a[k*matrix_size*matrix_size+j*matrix_size] < threshold){
for (int i = 0; i < matrix_size; i++){
int index = k*matrix_size*matrix_size+j*matrix_size+i;
dev_c[index] = dev_a[index] + dev_b[index];
}
//Do subtraction
} else {
for (int i = 0; i < matrix_size; i++){
int index = k*matrix_size*matrix_size+j*matrix_size+i;
dev_c[index] = dev_a[index] - dev_b[index];
}
}
}
}
}
int main( int argc, char* argv[] ) {
// Parse Input arguments
// Check the number of arguments (we only receive command + vector size)
if (argc != 3) {
// Tell the user how to run the program
printf ("Usage:\n%s <number of matrices> <matrix_size>\n", argv[0]);
// "Usage messages" are a conventional way of telling the user
// how to run a program if they enter the command incorrectly.
return -1;
}
srand ( time(NULL) );
// Set variables with input arguments
int num_matrices = atoi(argv[1]);
int matrix_size = atoi(argv[2]);
// Set device that we will use for our cuda code
cudaSetDevice(0);
// Time Variables
cudaEvent_t stp_start, stp_stop;
cudaEvent_t cpu_start, cpu_stop;
cudaEvent_t gpu_start, gpu_stop;
cudaEvent_t ker_start, ker_stop;
cudaEventCreate (&stp_start);
cudaEventCreate (&stp_stop);
cudaEventCreate (&cpu_start);
cudaEventCreate (&cpu_stop);
cudaEventCreate (&gpu_start);
cudaEventCreate (&gpu_stop);
cudaEventCreate (&ker_start);
cudaEventCreate (&ker_stop);
float time, ker_time;
// Input Arrays and variables
char *a = new char [num_matrices*matrix_size*matrix_size];
char *b = new char [num_matrices*matrix_size*matrix_size];
char *c_cpu = new char [num_matrices*matrix_size*matrix_size];
char *c_gpu = new char [num_matrices*matrix_size*matrix_size];
// Pointers in GPU memory
char *dev_a;
char *dev_b;
char *dev_c;
//
// Fill arrays
//////////////////
cudaEventRecord(stp_start,0);
#if defined(_OPENMP)
printf("Setting up input arrays in parallel.\n");
omp_set_num_threads(8);
#else
printf("Setting up input arrays.\n");
#endif
#pragma omp parallel for
for (int k = 0; k < num_matrices; k++) {
#if defined(_OPENMP)
if (k == 0) printf ("Using %d threads.\n", omp_get_num_threads());
#endif
for (int j = 0; j < matrix_size*matrix_size; j++){
a[k*matrix_size*matrix_size + j] = j%9+1;
b[k*matrix_size*matrix_size + j] = j%10;
c_cpu[k*matrix_size*matrix_size + j] = 0;
c_gpu[k*matrix_size*matrix_size + j] = 0;
}
}
cudaEventRecord(stp_stop,0);
cudaEventSynchronize(stp_stop);
cudaEventElapsedTime(&time, stp_start, stp_stop);
printf("\tSetup Time: %.2f ms\n", time);
//
// CPU Calculation
//////////////////
printf("Running sequential job.\n");
cudaEventRecord(cpu_start,0);
// Calculate C in the CPU
for (int k = 0; k < num_matrices; k++) {
// If first element is different than 0 do the computation
if (a[k*matrix_size*matrix_size] != 0){
for (int j = 0; j < matrix_size; j++){
//If first value in the row of the matrix, do addition
if (a[k*matrix_size*matrix_size+j*matrix_size] < threshold){
for (int i = 0; i < matrix_size; i++){
int index = k*matrix_size*matrix_size+j*matrix_size+i;
c_cpu[index] = a[index] + b[index];
}
//Do subtraction
} else {
for (int i = 0; i < matrix_size; i++){
int index = k*matrix_size*matrix_size+j*matrix_size+i;
c_cpu[index] = a[index] - b[index];
}
}
}
}
}
cudaEventRecord(cpu_stop,0);
cudaEventSynchronize(cpu_stop);
cudaEventElapsedTime(&time, cpu_start, cpu_stop);
printf("\tSequential Job Time: %.2f ms\n", time);
//
// GPU Calculation
//////////////////
printf("Running parallel job.\n");
int grid_size = ((num_matrices-1)/block_size) + 1;
cudaEventRecord(gpu_start,0);
// allocate the memory on the GPU
cudaMalloc( (void**)&dev_a, num_matrices * matrix_size * matrix_size * sizeof(char) );
cudaMalloc( (void**)&dev_b, num_matrices * matrix_size * matrix_size * sizeof(char) );
cudaMalloc( (void**)&dev_c, num_matrices * matrix_size * matrix_size * sizeof(char) );
// set arrays to 0
cudaMemset(dev_a, 0, num_matrices * matrix_size * matrix_size * sizeof(char));
cudaMemset(dev_b, 0, num_matrices * matrix_size * matrix_size * sizeof(char));
cudaMemset(dev_c, 0, num_matrices * matrix_size * matrix_size * sizeof(char));
// copy the 'data' to the GPU
cudaMemcpy( dev_a, a, num_matrices * matrix_size * matrix_size * sizeof(char), cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, num_matrices * matrix_size * matrix_size * sizeof(char), cudaMemcpyHostToDevice );
// run kernel
cudaEventRecord(ker_start,0);
calculation<<<grid_size,block_size>>>( dev_a,
dev_b,
dev_c,
num_matrices,
matrix_size );
cudaEventRecord(ker_stop,0);
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy( c_gpu, dev_c, num_matrices * matrix_size * matrix_size * sizeof(char), cudaMemcpyDeviceToHost );
cudaEventRecord(gpu_stop,0);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&time , gpu_start, gpu_stop);
cudaEventElapsedTime(&ker_time, ker_start, ker_stop);
printf("\tParallel Job Time: %.2f ms\n", time);
printf("\tKernel Exec. Time: %.2f ms\n", ker_time);
//
// Compare Results
//////////////////
int error = 0;
for (int i = 0; i < num_matrices * matrix_size * matrix_size; i++) {
if (c_cpu[i] != c_gpu[i]){
error = 1;
printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] );
}
if (error) break;
}
if (error == 0){
printf ("Correct result. No errors were found.\n");
}
//
// Free resources
//////////////////
// free the memory allocated on the GPU
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
// free cuda events
cudaEventDestroy (cpu_start);
cudaEventDestroy (gpu_start);
cudaEventDestroy (ker_start);
cudaEventDestroy (cpu_stop);
cudaEventDestroy (gpu_stop);
cudaEventDestroy (ker_stop);
// free CPU memory
free(a);
free(b);
free(c_cpu);
free(c_gpu);
return 0;
}
|
37b2de0d4d3ad6021280568f2693bb563f4ae822.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "hip/device_functions.h"
#include <thrust/scan.h>
#include <thrust/sort.h>
#include "CycleTimer.h"
#define SCAN_BLOCK_DIM 512
#define uint unsigned int
#include "exclusiveScan.cu_inl"
#include "cuPrintf.hip"
#include "scan.hip"
#include "book.h"
using namespace std;
extern float toBW(int bytes, float sec);
//brute force find matching tuples
void sequential_join(int2* rel_a, int2* rel_b, int rel_a_size, int rel_b_size, int3* out, int* numResult) {
*numResult = 0;
double startTime = CycleTimer::currentSeconds();
for (int i = 0; i < rel_a_size; i++) {
for(int j = 0; j < rel_b_size; j ++ ) {
if(rel_a[i].x == rel_b[j].x) {
out[*numResult].x = rel_a[i].x;
out[*numResult].y = rel_a[i].y;
out[*numResult].z = rel_b[j].y;
(*numResult)++;
}
else if( rel_a[i].x < rel_b[j].x) {
break;
}
}
}
double endTime = CycleTimer::currentSeconds();
printf("time excution from sequential join %.3f ms\n",1000.f * (endTime - startTime));
printf("sequential join produces %d tuples\n", *numResult);
}
void validate_join(int3* seq_out, int seq_num, int3* cuda_out, int cuda_num) {
if( seq_num != cuda_num ) {
printf("num of tuples seq(%d) != cuda(%d)", seq_num, cuda_num);
return;
}
for(int i = 0 ; i < cuda_num; i ++) {
if( seq_out[i].x != cuda_out[i].x ||seq_out[i].y != cuda_out[i].y || seq_out[i].z != cuda_out[i].z) {
printf("At line %d, not match FAIL\n",i);
printf("seq: [%d, %d, %d], GPU: [%d, %d, %d]\n",seq_out[i].x,seq_out[i].y, seq_out[i].z, cuda_out[i].x, cuda_out[i].y, cuda_out[i].z);
return;
}
}
printf("JOIN PASS !\n");
}
/*
choose the quilified tuples from the relation
and get the cout of tuples of each block
*/
__global__ void
primitive_select_kernel(int N, int* tuples, int* result, int* result_size) {
__shared__ uint input[SCAN_BLOCK_DIM];
__shared__ uint output[SCAN_BLOCK_DIM];
__shared__ uint scratch[2 * SCAN_BLOCK_DIM];
int threadIndex = threadIdx.x;
int partition = blockIdx.x * blockDim.x;
//cuPrintf("%d\n", threadIndex);
input[threadIndex] = 0;
output[threadIndex] = 0;
if ( partition + threadIndex < N ) {
input[threadIndex] = tuples[partition + threadIndex] % 2 == 0? 1 : 0;
}
__syncthreads();
sharedMemExclusiveScan(threadIndex, input, output, scratch, SCAN_BLOCK_DIM);
if(input[threadIndex]){
//atomicAdd(result_size + blockIdx.x, 1);
result[partition + output[threadIndex]] = tuples[partition + threadIndex];
}
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1) {
if(threadIdx.x < offset) {
// add a partial sum upstream to our own
input[threadIdx.x] += input[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0) {
result_size[blockIdx.x] = input[0];
}
}
/*
gather stage
*/
__global__ void coalesced(int N, int* result, int* result_size, int* histogram, int* out) {
int threadIndex = threadIdx.x;
int partition = blockIdx.x * blockDim.x;
if( threadIndex < result_size[blockIdx.x] ) {
out[histogram[blockIdx.x] + threadIndex] = result[partition + threadIndex];
}
__syncthreads();
}
/*
This is a sample of how to use scanLargeArray
from Nvidia SDK
*/
void primitive_scan(int N, int inData[], int outData[]) {
int large_num = 39063;
float tmp[large_num];
float* large_in;
float* large_out;
double startTime;
double endTime;
hipMalloc((void**) &large_in, sizeof(float) * large_num);
hipMalloc((void**) &large_out, sizeof(float) * large_num);
//hipMemset(large_in, 1, large_num * sizeof(float));
for(int i = 0; i < large_num; i ++) {
tmp[i] = 1.0;
}
hipMemcpy(large_in, tmp, sizeof(float) * large_num, hipMemcpyHostToDevice);
startTime = CycleTimer::currentSeconds();
preallocBlockSums(large_num);
prescanArray(large_out, large_in, large_num);
endTime = CycleTimer::currentSeconds();
printf("time excution from large array scan %.3f ms\n", 1000.f * (endTime - startTime));
/* startTime = CycleTimer::currentSeconds();
thrust::device_ptr<float> dev_ptr1(large_in);
thrust::device_ptr<float> dev_ptr2(large_out);
thrust::exclusive_scan(dev_ptr1, dev_ptr1 + large_num, dev_ptr2);
endTime = CycleTimer::currentSeconds();
printf("time excution from thrust scan %.3f ms\n",1000.f * (endTime - startTime));*/
hipMemcpy(tmp, large_out, sizeof(float) * large_num, hipMemcpyDeviceToHost);
for(int i = 0; i < large_num; i ++) {
printf("%f ", tmp[i]);
}
printf("\n");
int y[] = {1, 2};
printf("%d\n", y[(int)tmp[1]]);
deallocBlockSums();
}
/*
Implementation of SELECT operation
*/
void
primitive_select(int N, int inData[], int outData[]) {
const int threadPerBlock = 512;
const int blocks = (N + threadPerBlock - 1) / threadPerBlock;
const int blocksOfReulstSize = ( blocks + threadPerBlock - 1) / threadPerBlock;
int totalBytes = N * sizeof(int) * 2;
printf("Num of tuples %d\n", N);
printf("Num of blocks %d\n", blocks);
printf("Num of blocks for result size %d\n", blocksOfReulstSize);
int* device_in;
int* device_result;
int* result_size;
int* histogram;
int* out;
int* tmp = (int*)calloc(N, sizeof(int));
hipMalloc((void**) &device_in, sizeof(int) * N);
hipMalloc((void**) &device_result, sizeof(int) * N);
hipMalloc((void**) &out, sizeof(int) * N);
hipMalloc((void**) &result_size, sizeof(int) * blocks);
hipMalloc((void**) &histogram, sizeof(int) * blocks);
double startTime = CycleTimer::currentSeconds();
hipMemcpy(device_in, inData, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(device_result, tmp, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(out, tmp, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(result_size, tmp, sizeof(int) * blocks, hipMemcpyHostToDevice);
cudaPrintfInit();
double startTime_inner = CycleTimer::currentSeconds();
// for(int i = 0 ; i < 10 ; i ++) {
hipLaunchKernelGGL(( primitive_select_kernel), dim3(blocks), dim3(threadPerBlock), 0, 0, N, device_in, device_result, result_size);
// int test_result_size[blocks];
// hipMemcpy(test_result_size, result_size, sizeof(int)*blocks, hipMemcpyDeviceToHost);
// for(int i = 0 ; i < blocks ; i ++) {
// printf("%d, ", test_result_size[i]);
// }
// printf("\n");
hipDeviceSynchronize();
//prescan<<<blocksOfReulstSize, threadPerBlock, blocks * threadPerBlock * 2 * sizeof(int)>>>(histogram, result_size, blocks);
thrust::device_ptr<int> dev_ptr1(result_size);
thrust::device_ptr<int> dev_ptr2(histogram);
thrust::exclusive_scan(dev_ptr1, dev_ptr1 + blocks, dev_ptr2);
// int test_histgram[blocks];
// hipMemcpy(test_histgram, histogram, sizeof(int)*blocks, hipMemcpyDeviceToHost);
// for(int i = 0 ; i < blocks; i ++) {
// printf("%d, ", test_histgram[i]);
// }
// printf("\n");
hipLaunchKernelGGL(( coalesced), dim3(blocks), dim3(threadPerBlock), 0, 0, N, device_result, result_size, histogram, out);
// }
double endTime_inner = CycleTimer::currentSeconds();
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
hipMemcpy(outData, out, sizeof(int) * N, hipMemcpyDeviceToHost);
double endTime = CycleTimer::currentSeconds();
double overallDuration = endTime - startTime;
double kernelDuration = endTime_inner - startTime_inner;
printf("CUDA overall: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration));
printf("CUDA execution time for kernel: %.3f ms\t\t[%.3f GB/s]\n", 1000.f*kernelDuration, toBW(totalBytes, kernelDuration));
hipFree(device_in);
hipFree(device_result);
hipFree(out);
hipFree(result_size);
hipFree(histogram);
}
__device__ int get_index_to_check(int thread, int num_threads, int set_size, int offset) {
// Integer division trick to round up
return (((set_size + num_threads) / num_threads) * thread) + offset;
}
__device__ void search_lower(int search, int array_length, int2 *arr, int *ret_val ) {
const int num_threads = blockDim.x;
const int thread = threadIdx.x;
int set_size = array_length;
while(set_size != 0){
// Get the offset of the array, initially set to 0
int offset = ret_val[1];
// I think this is necessary in case a thread gets ahead, and resets offset before it's read
// This isn't necessary for the unit tests to pass, but I still like it here
__syncthreads();
// Get the next index to check
int index_to_check = get_index_to_check(thread, num_threads, set_size, offset);
// If the index is outside the bounds of the array then lets not check it
if (index_to_check < array_length){
// If the next index is outside the bounds of the array, then set it to maximum array size
int next_index_to_check = get_index_to_check(thread + 1, num_threads, set_size, offset);
if (next_index_to_check >= array_length){
next_index_to_check = array_length - 1;
}
/* if( search == 5 && blockIdx.x == 1) {
cuPrintf("index to check arr[%d] = %d , arr[%d] = %d \n", index_to_check,arr[index_to_check].x, next_index_to_check, arr[next_index_to_check].x);
}*/
// If we're at the mid section of the array reset the offset to this index
if (search > arr[index_to_check].x && (search <= arr[next_index_to_check].x)) {
ret_val[1] = index_to_check;
}
else if (search == arr[index_to_check].x) {
// Set the return var if we hit it
/* if(blockIdx.x == 1 && search == 5) {
cuPrintf("find it at %d %d\n", index_to_check, ret_val[0]);
}*/
atomicMin(&ret_val[0], index_to_check);
}
}
// Since this is a p-ary search divide by our total threads to get the next set size
set_size = set_size / num_threads;
// Sync up so no threads jump ahead and get a bad offset
__syncthreads();
}
}
__device__ void search_upper(int search, int array_length, int2 *arr, int *ret_val ) {
const int num_threads = blockDim.x;
const int thread = threadIdx.x;
int set_size = array_length;
while(set_size != 0){
// Get the offset of the array, initially set to 0
int offset = ret_val[1];
// I think this is necessary in case a thread gets ahead, and resets offset before it's read
// This isn't necessary for the unit tests to pass, but I still like it here
__syncthreads();
// Get the next index to check
int index_to_check = get_index_to_check(thread, num_threads, set_size, offset);
// If the index is outside the bounds of the array then lets not check it
if (index_to_check < array_length){
// If the next index is outside the bounds of the array, then set it to maximum array size
int next_index_to_check = get_index_to_check(thread + 1, num_threads, set_size, offset);
if (next_index_to_check >= array_length){
next_index_to_check = array_length - 1;
}
/* if( search == 5 && blockIdx.x == 1) {
cuPrintf("index to check arr[%d] = %d , arr[%d] = %d \n", index_to_check,arr[index_to_check].x, next_index_to_check, arr[next_index_to_check].x);
}*/
// If we're at the mid section of the array reset the offset to this index
if (search > arr[index_to_check].x && (search <= arr[next_index_to_check].x)) {
ret_val[1] = index_to_check;
}
else if (search == arr[index_to_check].x) {
// Set the return var if we hit it
/* if(blockIdx.x == 1 && search == 5) {
cuPrintf("find it at %d %d\n", index_to_check, ret_val[0]);
}*/
atomicMax(&ret_val[0], index_to_check);
}
}
// Since this is a p-ary search divide by our total threads to get the next set size
set_size = set_size / num_threads;
// Sync up so no threads jump ahead and get a bad offset
__syncthreads();
}
}
__global__ void p_ary_search(int search, int array_length, int2 *arr, int *ret_val ) {
const int num_threads = blockDim.x * gridDim.x;
const int thread = blockIdx.x * blockDim.x + threadIdx.x;
//ret_val[0] = -1;
//ret_val[1] = 0;
int set_size = array_length;
while(set_size != 0){
// Get the offset of the array, initially set to 0
int offset = ret_val[1];
// I think this is necessary in case a thread gets ahead, and resets offset before it's read
// This isn't necessary for the unit tests to pass, but I still like it here
__syncthreads();
// Get the next index to check
int index_to_check = get_index_to_check(thread, num_threads, set_size, offset);
// If the index is outside the bounds of the array then lets not check it
if (index_to_check < array_length){
// If the next index is outside the bounds of the array, then set it to maximum array size
int next_index_to_check = get_index_to_check(thread + 1, num_threads, set_size, offset);
if (next_index_to_check >= array_length){
next_index_to_check = array_length - 1;
}
// If we're at the mid section of the array reset the offset to this index
if (search > arr[index_to_check].x && (search < arr[next_index_to_check].x)) {
ret_val[1] = index_to_check;
}
else if (search == arr[index_to_check].x) {
// Set the return var if we hit it
ret_val[0] = index_to_check;
}
}
// Since this is a p-ary search divide by our total threads to get the next set size
set_size = set_size / num_threads;
// Sync up so no threads jump ahead and get a bad offset
__syncthreads();
}
}
__global__ void pnary_partition(int2* rel_a, int2* rel_b, int* lower_array, int* upper_array, float* out_bound, int N, int M) {
const int lower_bound = rel_a[blockIdx.x * blockDim.x].x;
const int upper_bound = rel_a[(blockIdx.x + 1) * blockDim.x - 1].x;
__shared__ int lower;
__shared__ int upper;
lower_array[2 * blockIdx.x] = M;
lower_array[2 * blockIdx.x + 1] = 0;
upper_array[2 * blockIdx.x] = -1;
upper_array[2 * blockIdx.x + 1] = 0;
__syncthreads();
search_lower(lower_bound, M, rel_b, &lower_array[2 * blockIdx.x]);
search_upper(upper_bound, M, rel_b, &upper_array[2 * blockIdx.x]);
lower = lower_array[2 * blockIdx.x] < M? lower_array[2 * blockIdx.x]:lower_array[2 * blockIdx.x + 1];
upper = upper_array[2 * blockIdx.x] >= 0? upper_array[2 * blockIdx.x]:upper_array[2 * blockIdx.x + 1];
if( upper < lower) {
upper = M - 1;
}
out_bound[blockIdx.x] = blockDim.x * ( upper - lower + 1);
/* if(threadIdx.x == 0) {
cuPrintf("lower_bound: %d ret: %d offset: %d\n", lower_bound, lower_array[2 * blockIdx.x], lower_array[2 * blockIdx.x + 1]);
cuPrintf("upper_bound: %d ret: %d offset: %d\n", upper_bound, upper_array[2 * blockIdx.x], upper_array[2 * blockIdx.x + 1]);
cuPrintf("num result tuples: %f\n", out_bound[blockIdx.x]);
}*/
}
void
__global__ brute_join( int3* out, int2* rel_a, int2* rel_b, int num, int N, int M, float* out_bound, float* result_size, int* lower_array, int* upper_array ) {
__shared__ int2 left[512];
__shared__ int2 right[1024];
__shared__ uint count[512];
__shared__ uint index[512];
__shared__ uint scratch[1024];
int lower;
int upper;
int num_right;
lower = lower_array[2 * blockIdx.x] < M? lower_array[2 * blockIdx.x]:lower_array[2 * blockIdx.x + 1];
upper = upper_array[2 * blockIdx.x] >= 0? upper_array[2 * blockIdx.x]:upper_array[2 * blockIdx.x + 1];
if( upper < lower) {
upper = M - 1;
}
num_right = upper - lower + 1;
int threadIndex = threadIdx.x;
int partition = blockIdx.x * blockDim.x;
// counter for each thread
count[threadIndex] = 0;
index[threadIndex] = 0;
// load two relation to the cache, make future access faster
left[threadIndex] = rel_a[partition + threadIndex];
for(int i = 0 ; i < num_right; i+= 512) {
if(i + threadIndex < num_right) {
//cuPrintf("%d\n",lower + i + threadIndex);
right[i + threadIndex] = rel_b[lower + i + threadIndex];
}
__syncthreads();
}
for(int i = 0 ; i < num_right; i++ ) {
if(left[threadIndex].x == right[i].x) {
count[threadIndex] ++;
}
}
__syncthreads();
sharedMemExclusiveScan(threadIndex, count, index, scratch, SCAN_BLOCK_DIM);
int current = 0;
for(int i = 0 ; i < num_right; i++ ) {
if(left[threadIndex].x == right[i].x) {
int j = (int)out_bound[blockIdx.x] + index[threadIndex] + current;
//cuPrintf("out index %d of %d\n", j, num);
if( j < num) {
out[j].x = left[threadIndex].x;
out[j].y = left[threadIndex].y;
out[j].z = right[i].y;
/*if( blockIdx.x == 3) {
cuPrintf("index %d = %d + %d + %d\n", j, (int)out_bound[blockIdx.x], index[threadIndex], current);
}*/
current++;
}
}
}
if(threadIdx.x == 0) {
result_size[blockIdx.x] = count[511] + index[511];
//cuPrintf("result size: %f\n",result_size[blockIdx.x]);
}
}
void
__global__ join_coalesced(int3* result, int3* out, float* result_size, float* histogram, float* out_bound) {
int size = result_size[blockIdx.x];
int result_index;
int out_index;
for(int i = 0; i < size; i += 512) {
if(i + threadIdx.x < size) {
out_index = out_bound[blockIdx.x] + threadIdx.x + i;
result_index = histogram[blockIdx.x] + threadIdx.x + i;
/* if(blockIdx.x == 3) {
cuPrintf("## %d = %d + %d + %d\n", out_index, (int)out_bound[blockIdx.x], threadIdx.x, i);
}*/
result[result_index].x = out[out_index].x;
result[result_index].y = out[out_index].y;
result[result_index].z = out[out_index].z;
}
__syncthreads();
}
}
/*
Implementation of JOIN operationi
rel_a: left relation
rel_b: right relation
N: size of rel_a
M: size of rel_b
*/
struct compare_int2 {
__host__ __device__
bool operator()(int2 a, int2 b) {
return a.x <= b.x;
}
};
void primitive_join(int N, int M) {
// prepare host buffers
int min = 1;
int max = 1024;
int2* rel_a = new int2[N];
int2* rel_b = new int2[M];
int3* result_seq = new int3[4 * N];
int seq_num;
for(int i = 0; i < N; i ++) {
rel_a[i] = make_int2(min + (rand() % (int)(max - min + 1)), min + (rand() % (int)(max - min + 1)) );
}
for(int i = 0; i < M; i ++) {
rel_b[i] = make_int2(min + (rand() % (int)(max - min + 1)), min + (rand() % (int)(max - min + 1)) );
}
thrust::sort(rel_a, rel_a + N, compare_int2());
thrust::sort(rel_b, rel_b + M, compare_int2());
sequential_join(rel_a, rel_b, N, M, result_seq, &seq_num);
// prepare device buffers
const int threadPerBlock = 512;
const int blocks = (N + threadPerBlock - 1) / threadPerBlock;
printf("num blocks: %d\n", blocks);
int2* dev_rel_a;
int2* dev_rel_b;
int* lower_array;
int* upper_array;
float* out_bound;
//float* out_bound_scan;
float* result_size;
float* histogram;
int3* out;
int3* result;
hipMalloc((void**) &out, sizeof(int3) * N * 4);
hipMalloc((void**) &result, sizeof(int3) * N * 4);
hipMalloc((void**) &result_size, sizeof(float) * blocks);
hipMalloc((void**) &histogram, sizeof(float) * blocks);
hipMalloc((void**) &out_bound, sizeof(float) * blocks);
hipMalloc((void**) &lower_array, sizeof(int) * blocks * 2);
hipMalloc((void**) &upper_array, sizeof(int) * blocks * 2);
hipMalloc((void**) &dev_rel_a, sizeof(int2) * N);
hipMalloc((void**) &dev_rel_b, sizeof(int2) * M);
double startTime = CycleTimer::currentSeconds();
hipMemcpy(dev_rel_a, rel_a, sizeof(int2) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_rel_b, rel_b, sizeof(int2) * M, hipMemcpyHostToDevice);
cudaPrintfInit();
double startTime_inner = CycleTimer::currentSeconds();
hipLaunchKernelGGL(( pnary_partition), dim3(blocks), dim3(threadPerBlock) , 0, 0, dev_rel_a, dev_rel_b, lower_array, upper_array ,out_bound, N, M);
thrust::device_ptr<float> dev_ptr1(out_bound);
thrust::exclusive_scan(dev_ptr1, dev_ptr1 + blocks, dev_ptr1);
//prescanArray(out_bound, out_bound, blocks);
//deallocBlockSums();
hipLaunchKernelGGL(( brute_join), dim3(blocks), dim3(threadPerBlock) , 0, 0, out, dev_rel_a, dev_rel_b, N * 4 , N, M, out_bound, result_size, lower_array, upper_array);
thrust::device_ptr<float> dev_ptr2(result_size);
thrust::device_ptr<float> dev_ptr3(histogram);
thrust::exclusive_scan(dev_ptr2, dev_ptr2 + blocks, dev_ptr3);
hipLaunchKernelGGL(( join_coalesced), dim3(blocks), dim3(threadPerBlock), 0, 0, result, out, result_size, histogram, out_bound);
double endTime_inner = CycleTimer::currentSeconds();
printf("time excution from cuda join kernel %.3f ms\n",1000.f * (endTime_inner - startTime_inner));
float* p = new float[blocks];
int numResult = 0;
hipMemcpy(p, result_size, sizeof(float) * blocks, hipMemcpyDeviceToHost);
numResult += p[blocks-1];
hipMemcpy(p, histogram, sizeof(float) * blocks, hipMemcpyDeviceToHost);
numResult += p[blocks-1];
printf("cuda produces %d tuples\n", numResult);
int3* tmp_check = new int3[numResult];
hipMemcpy(tmp_check, result, sizeof(int3)*numResult, hipMemcpyDeviceToHost);
double endTime = CycleTimer::currentSeconds();
printf("time excution from cuda join overall %.3f ms\n",1000.f * (endTime - startTime));
validate_join(result_seq, seq_num, tmp_check, numResult);
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
hipFree(dev_rel_a);
hipFree(dev_rel_b);
hipFree(lower_array);
hipFree(upper_array);
hipFree(out_bound);
hipFree(result_size);
hipFree(out);
hipFree(histogram);
hipFree(result);
// deallocBlockSums();
}
#define N (1024*1024)
#define FULL_DATA_SIZE (N*20)
__global__ void kernel( int *a, int *b, int *c ) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
int idx1 = (idx + 1) % 256;
int idx2 = (idx + 2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
c[idx] = (as + bs) / 2;
}
}
void streamTest() {
hipDeviceProp_t prop;
int whichDevice;
HANDLE_ERROR( hipGetDevice( &whichDevice ) );
HANDLE_ERROR( hipGetDeviceProperties( &prop, whichDevice ) );
if (!prop.deviceOverlap) {
printf( "Device will not handle overlaps, so no speed up from streams\n" );
}
hipEvent_t start, stop;
float elapsedTime;
hipStream_t stream0, stream1;
int *host_a, *host_b, *host_c;
int *dev_a0, *dev_b0, *dev_c0;
int *dev_a1, *dev_b1, *dev_c1;
// start the timers
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
// initialize the streams
HANDLE_ERROR( hipStreamCreate( &stream0 ) );
HANDLE_ERROR( hipStreamCreate( &stream1 ) );
// allocate the memory on the GPU
HANDLE_ERROR( hipMalloc( (void**)&dev_a0,
N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_b0,
N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_c0,
N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_a1,
N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_b1,
N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_c1,
N * sizeof(int) ) );
// allocate host locked memory, used to stream
HANDLE_ERROR( hipHostMalloc( (void**)&host_a,
FULL_DATA_SIZE * sizeof(int),
hipHostMallocDefault ) );
HANDLE_ERROR( hipHostMalloc( (void**)&host_b,
FULL_DATA_SIZE * sizeof(int),
hipHostMallocDefault ) );
HANDLE_ERROR( hipHostMalloc( (void**)&host_c,
FULL_DATA_SIZE * sizeof(int),
hipHostMallocDefault ) );
for (int i=0; i<FULL_DATA_SIZE; i++) {
host_a[i] = rand();
host_b[i] = rand();
}
HANDLE_ERROR( hipEventRecord( start, 0 ) );
// now loop over full data, in bite-sized chunks
for (int i=0; i<FULL_DATA_SIZE; i+= N*2) {
// enqueue copies of a in stream0 and stream1
HANDLE_ERROR( hipMemcpyAsync( dev_a0, host_a+i,
N * sizeof(int),
hipMemcpyHostToDevice,
stream0 ) );
HANDLE_ERROR( hipMemcpyAsync( dev_a1, host_a+i+N,
N * sizeof(int),
hipMemcpyHostToDevice,
stream1 ) );
// enqueue copies of b in stream0 and stream1
HANDLE_ERROR( hipMemcpyAsync( dev_b0, host_b+i,
N * sizeof(int),
hipMemcpyHostToDevice,
stream0 ) );
HANDLE_ERROR( hipMemcpyAsync( dev_b1, host_b+i+N,
N * sizeof(int),
hipMemcpyHostToDevice,
stream1 ) );
// enqueue kernels in stream0 and stream1
hipLaunchKernelGGL(( kernel), dim3(N/256),dim3(256),0,stream0, dev_a0, dev_b0, dev_c0 );
hipLaunchKernelGGL(( kernel), dim3(N/256),dim3(256),0,stream1, dev_a1, dev_b1, dev_c1 );
// enqueue copies of c from device to locked memory
HANDLE_ERROR( hipMemcpyAsync( host_c+i, dev_c0,
N * sizeof(int),
hipMemcpyDeviceToHost,
stream0 ) );
HANDLE_ERROR( hipMemcpyAsync( host_c+i+N, dev_c1,
N * sizeof(int),
hipMemcpyDeviceToHost,
stream1 ) );
}
HANDLE_ERROR( hipStreamSynchronize( stream0 ) );
HANDLE_ERROR( hipStreamSynchronize( stream1 ) );
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime,
start, stop ) );
printf( "Time taken: %3.1f ms\n", elapsedTime );
// cleanup the streams and memory
HANDLE_ERROR( hipHostFree( host_a ) );
HANDLE_ERROR( hipHostFree( host_b ) );
HANDLE_ERROR( hipHostFree( host_c ) );
HANDLE_ERROR( hipFree( dev_a0 ) );
HANDLE_ERROR( hipFree( dev_b0 ) );
HANDLE_ERROR( hipFree( dev_c0 ) );
HANDLE_ERROR( hipFree( dev_a1 ) );
HANDLE_ERROR( hipFree( dev_b1 ) );
HANDLE_ERROR( hipFree( dev_c1 ) );
HANDLE_ERROR( hipStreamDestroy( stream0 ) );
HANDLE_ERROR( hipStreamDestroy( stream1 ) );
}
void sequentialTest() {
hipEvent_t start, stop;
float elapsedTime;
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
int* host_a = new int[FULL_DATA_SIZE];
int* host_b = new int[FULL_DATA_SIZE];
int* host_c = new int[FULL_DATA_SIZE];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
hipMalloc( (void**)&dev_a, FULL_DATA_SIZE * sizeof(int));
hipMalloc( (void**)&dev_b, FULL_DATA_SIZE * sizeof(int));
hipMalloc( (void**)&dev_c, FULL_DATA_SIZE * sizeof(int));
for (int i=0; i<FULL_DATA_SIZE; i++) {
host_a[i] = rand();
host_b[i] = rand();
}
HANDLE_ERROR( hipEventRecord( start, 0 ) );
hipMemcpy(dev_a, host_a, sizeof(int) * FULL_DATA_SIZE, hipMemcpyHostToDevice);
hipMemcpy(dev_b, host_b, sizeof(int) * FULL_DATA_SIZE, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel), dim3(FULL_DATA_SIZE/256),dim3(256),0, 0, dev_a, dev_b, dev_c);
hipMemcpyAsync( host_c, dev_c, FULL_DATA_SIZE * sizeof(int), hipMemcpyDeviceToHost);
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime,
start, stop ) );
printf( "Time taken: %3.1f ms\n", elapsedTime );
}
void
printCudaInfo() {
// for fun, just print out some stats on the machine
int deviceCount = 0;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
}
| 37b2de0d4d3ad6021280568f2693bb563f4ae822.cu | #include <stdio.h>
#include <cuda.h>
#include <iostream>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "device_functions.h"
#include <thrust/scan.h>
#include <thrust/sort.h>
#include "CycleTimer.h"
#define SCAN_BLOCK_DIM 512
#define uint unsigned int
#include "exclusiveScan.cu_inl"
#include "cuPrintf.cu"
#include "scan.cu"
#include "book.h"
using namespace std;
extern float toBW(int bytes, float sec);
//brute force find matching tuples
void sequential_join(int2* rel_a, int2* rel_b, int rel_a_size, int rel_b_size, int3* out, int* numResult) {
*numResult = 0;
double startTime = CycleTimer::currentSeconds();
for (int i = 0; i < rel_a_size; i++) {
for(int j = 0; j < rel_b_size; j ++ ) {
if(rel_a[i].x == rel_b[j].x) {
out[*numResult].x = rel_a[i].x;
out[*numResult].y = rel_a[i].y;
out[*numResult].z = rel_b[j].y;
(*numResult)++;
}
else if( rel_a[i].x < rel_b[j].x) {
break;
}
}
}
double endTime = CycleTimer::currentSeconds();
printf("time excution from sequential join %.3f ms\n",1000.f * (endTime - startTime));
printf("sequential join produces %d tuples\n", *numResult);
}
void validate_join(int3* seq_out, int seq_num, int3* cuda_out, int cuda_num) {
if( seq_num != cuda_num ) {
printf("num of tuples seq(%d) != cuda(%d)", seq_num, cuda_num);
return;
}
for(int i = 0 ; i < cuda_num; i ++) {
if( seq_out[i].x != cuda_out[i].x ||seq_out[i].y != cuda_out[i].y || seq_out[i].z != cuda_out[i].z) {
printf("At line %d, not match FAIL\n",i);
printf("seq: [%d, %d, %d], GPU: [%d, %d, %d]\n",seq_out[i].x,seq_out[i].y, seq_out[i].z, cuda_out[i].x, cuda_out[i].y, cuda_out[i].z);
return;
}
}
printf("JOIN PASS !\n");
}
/*
choose the quilified tuples from the relation
and get the cout of tuples of each block
*/
__global__ void
primitive_select_kernel(int N, int* tuples, int* result, int* result_size) {
__shared__ uint input[SCAN_BLOCK_DIM];
__shared__ uint output[SCAN_BLOCK_DIM];
__shared__ uint scratch[2 * SCAN_BLOCK_DIM];
int threadIndex = threadIdx.x;
int partition = blockIdx.x * blockDim.x;
//cuPrintf("%d\n", threadIndex);
input[threadIndex] = 0;
output[threadIndex] = 0;
if ( partition + threadIndex < N ) {
input[threadIndex] = tuples[partition + threadIndex] % 2 == 0? 1 : 0;
}
__syncthreads();
sharedMemExclusiveScan(threadIndex, input, output, scratch, SCAN_BLOCK_DIM);
if(input[threadIndex]){
//atomicAdd(result_size + blockIdx.x, 1);
result[partition + output[threadIndex]] = tuples[partition + threadIndex];
}
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1) {
if(threadIdx.x < offset) {
// add a partial sum upstream to our own
input[threadIdx.x] += input[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0) {
result_size[blockIdx.x] = input[0];
}
}
/*
gather stage
*/
__global__ void coalesced(int N, int* result, int* result_size, int* histogram, int* out) {
int threadIndex = threadIdx.x;
int partition = blockIdx.x * blockDim.x;
if( threadIndex < result_size[blockIdx.x] ) {
out[histogram[blockIdx.x] + threadIndex] = result[partition + threadIndex];
}
__syncthreads();
}
/*
This is a sample of how to use scanLargeArray
from Nvidia SDK
*/
void primitive_scan(int N, int inData[], int outData[]) {
int large_num = 39063;
float tmp[large_num];
float* large_in;
float* large_out;
double startTime;
double endTime;
cudaMalloc((void**) &large_in, sizeof(float) * large_num);
cudaMalloc((void**) &large_out, sizeof(float) * large_num);
//cudaMemset(large_in, 1, large_num * sizeof(float));
for(int i = 0; i < large_num; i ++) {
tmp[i] = 1.0;
}
cudaMemcpy(large_in, tmp, sizeof(float) * large_num, cudaMemcpyHostToDevice);
startTime = CycleTimer::currentSeconds();
preallocBlockSums(large_num);
prescanArray(large_out, large_in, large_num);
endTime = CycleTimer::currentSeconds();
printf("time excution from large array scan %.3f ms\n", 1000.f * (endTime - startTime));
/* startTime = CycleTimer::currentSeconds();
thrust::device_ptr<float> dev_ptr1(large_in);
thrust::device_ptr<float> dev_ptr2(large_out);
thrust::exclusive_scan(dev_ptr1, dev_ptr1 + large_num, dev_ptr2);
endTime = CycleTimer::currentSeconds();
printf("time excution from thrust scan %.3f ms\n",1000.f * (endTime - startTime));*/
cudaMemcpy(tmp, large_out, sizeof(float) * large_num, cudaMemcpyDeviceToHost);
for(int i = 0; i < large_num; i ++) {
printf("%f ", tmp[i]);
}
printf("\n");
int y[] = {1, 2};
printf("%d\n", y[(int)tmp[1]]);
deallocBlockSums();
}
/*
Implementation of SELECT operation
*/
void
primitive_select(int N, int inData[], int outData[]) {
const int threadPerBlock = 512;
const int blocks = (N + threadPerBlock - 1) / threadPerBlock;
const int blocksOfReulstSize = ( blocks + threadPerBlock - 1) / threadPerBlock;
int totalBytes = N * sizeof(int) * 2;
printf("Num of tuples %d\n", N);
printf("Num of blocks %d\n", blocks);
printf("Num of blocks for result size %d\n", blocksOfReulstSize);
int* device_in;
int* device_result;
int* result_size;
int* histogram;
int* out;
int* tmp = (int*)calloc(N, sizeof(int));
cudaMalloc((void**) &device_in, sizeof(int) * N);
cudaMalloc((void**) &device_result, sizeof(int) * N);
cudaMalloc((void**) &out, sizeof(int) * N);
cudaMalloc((void**) &result_size, sizeof(int) * blocks);
cudaMalloc((void**) &histogram, sizeof(int) * blocks);
double startTime = CycleTimer::currentSeconds();
cudaMemcpy(device_in, inData, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(device_result, tmp, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(out, tmp, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(result_size, tmp, sizeof(int) * blocks, cudaMemcpyHostToDevice);
cudaPrintfInit();
double startTime_inner = CycleTimer::currentSeconds();
// for(int i = 0 ; i < 10 ; i ++) {
primitive_select_kernel<<<blocks, threadPerBlock>>>(N, device_in, device_result, result_size);
// int test_result_size[blocks];
// cudaMemcpy(test_result_size, result_size, sizeof(int)*blocks, cudaMemcpyDeviceToHost);
// for(int i = 0 ; i < blocks ; i ++) {
// printf("%d, ", test_result_size[i]);
// }
// printf("\n");
cudaThreadSynchronize();
//prescan<<<blocksOfReulstSize, threadPerBlock, blocks * threadPerBlock * 2 * sizeof(int)>>>(histogram, result_size, blocks);
thrust::device_ptr<int> dev_ptr1(result_size);
thrust::device_ptr<int> dev_ptr2(histogram);
thrust::exclusive_scan(dev_ptr1, dev_ptr1 + blocks, dev_ptr2);
// int test_histgram[blocks];
// cudaMemcpy(test_histgram, histogram, sizeof(int)*blocks, cudaMemcpyDeviceToHost);
// for(int i = 0 ; i < blocks; i ++) {
// printf("%d, ", test_histgram[i]);
// }
// printf("\n");
coalesced<<<blocks, threadPerBlock>>>(N, device_result, result_size, histogram, out);
// }
double endTime_inner = CycleTimer::currentSeconds();
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
cudaMemcpy(outData, out, sizeof(int) * N, cudaMemcpyDeviceToHost);
double endTime = CycleTimer::currentSeconds();
double overallDuration = endTime - startTime;
double kernelDuration = endTime_inner - startTime_inner;
printf("CUDA overall: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration));
printf("CUDA execution time for kernel: %.3f ms\t\t[%.3f GB/s]\n", 1000.f*kernelDuration, toBW(totalBytes, kernelDuration));
cudaFree(device_in);
cudaFree(device_result);
cudaFree(out);
cudaFree(result_size);
cudaFree(histogram);
}
__device__ int get_index_to_check(int thread, int num_threads, int set_size, int offset) {
// Integer division trick to round up
return (((set_size + num_threads) / num_threads) * thread) + offset;
}
__device__ void search_lower(int search, int array_length, int2 *arr, int *ret_val ) {
const int num_threads = blockDim.x;
const int thread = threadIdx.x;
int set_size = array_length;
while(set_size != 0){
// Get the offset of the array, initially set to 0
int offset = ret_val[1];
// I think this is necessary in case a thread gets ahead, and resets offset before it's read
// This isn't necessary for the unit tests to pass, but I still like it here
__syncthreads();
// Get the next index to check
int index_to_check = get_index_to_check(thread, num_threads, set_size, offset);
// If the index is outside the bounds of the array then lets not check it
if (index_to_check < array_length){
// If the next index is outside the bounds of the array, then set it to maximum array size
int next_index_to_check = get_index_to_check(thread + 1, num_threads, set_size, offset);
if (next_index_to_check >= array_length){
next_index_to_check = array_length - 1;
}
/* if( search == 5 && blockIdx.x == 1) {
cuPrintf("index to check arr[%d] = %d , arr[%d] = %d \n", index_to_check,arr[index_to_check].x, next_index_to_check, arr[next_index_to_check].x);
}*/
// If we're at the mid section of the array reset the offset to this index
if (search > arr[index_to_check].x && (search <= arr[next_index_to_check].x)) {
ret_val[1] = index_to_check;
}
else if (search == arr[index_to_check].x) {
// Set the return var if we hit it
/* if(blockIdx.x == 1 && search == 5) {
cuPrintf("find it at %d %d\n", index_to_check, ret_val[0]);
}*/
atomicMin(&ret_val[0], index_to_check);
}
}
// Since this is a p-ary search divide by our total threads to get the next set size
set_size = set_size / num_threads;
// Sync up so no threads jump ahead and get a bad offset
__syncthreads();
}
}
__device__ void search_upper(int search, int array_length, int2 *arr, int *ret_val ) {
const int num_threads = blockDim.x;
const int thread = threadIdx.x;
int set_size = array_length;
while(set_size != 0){
// Get the offset of the array, initially set to 0
int offset = ret_val[1];
// I think this is necessary in case a thread gets ahead, and resets offset before it's read
// This isn't necessary for the unit tests to pass, but I still like it here
__syncthreads();
// Get the next index to check
int index_to_check = get_index_to_check(thread, num_threads, set_size, offset);
// If the index is outside the bounds of the array then lets not check it
if (index_to_check < array_length){
// If the next index is outside the bounds of the array, then set it to maximum array size
int next_index_to_check = get_index_to_check(thread + 1, num_threads, set_size, offset);
if (next_index_to_check >= array_length){
next_index_to_check = array_length - 1;
}
/* if( search == 5 && blockIdx.x == 1) {
cuPrintf("index to check arr[%d] = %d , arr[%d] = %d \n", index_to_check,arr[index_to_check].x, next_index_to_check, arr[next_index_to_check].x);
}*/
// If we're at the mid section of the array reset the offset to this index
if (search > arr[index_to_check].x && (search <= arr[next_index_to_check].x)) {
ret_val[1] = index_to_check;
}
else if (search == arr[index_to_check].x) {
// Set the return var if we hit it
/* if(blockIdx.x == 1 && search == 5) {
cuPrintf("find it at %d %d\n", index_to_check, ret_val[0]);
}*/
atomicMax(&ret_val[0], index_to_check);
}
}
// Since this is a p-ary search divide by our total threads to get the next set size
set_size = set_size / num_threads;
// Sync up so no threads jump ahead and get a bad offset
__syncthreads();
}
}
__global__ void p_ary_search(int search, int array_length, int2 *arr, int *ret_val ) {
const int num_threads = blockDim.x * gridDim.x;
const int thread = blockIdx.x * blockDim.x + threadIdx.x;
//ret_val[0] = -1;
//ret_val[1] = 0;
int set_size = array_length;
while(set_size != 0){
// Get the offset of the array, initially set to 0
int offset = ret_val[1];
// I think this is necessary in case a thread gets ahead, and resets offset before it's read
// This isn't necessary for the unit tests to pass, but I still like it here
__syncthreads();
// Get the next index to check
int index_to_check = get_index_to_check(thread, num_threads, set_size, offset);
// If the index is outside the bounds of the array then lets not check it
if (index_to_check < array_length){
// If the next index is outside the bounds of the array, then set it to maximum array size
int next_index_to_check = get_index_to_check(thread + 1, num_threads, set_size, offset);
if (next_index_to_check >= array_length){
next_index_to_check = array_length - 1;
}
// If we're at the mid section of the array reset the offset to this index
if (search > arr[index_to_check].x && (search < arr[next_index_to_check].x)) {
ret_val[1] = index_to_check;
}
else if (search == arr[index_to_check].x) {
// Set the return var if we hit it
ret_val[0] = index_to_check;
}
}
// Since this is a p-ary search divide by our total threads to get the next set size
set_size = set_size / num_threads;
// Sync up so no threads jump ahead and get a bad offset
__syncthreads();
}
}
__global__ void pnary_partition(int2* rel_a, int2* rel_b, int* lower_array, int* upper_array, float* out_bound, int N, int M) {
const int lower_bound = rel_a[blockIdx.x * blockDim.x].x;
const int upper_bound = rel_a[(blockIdx.x + 1) * blockDim.x - 1].x;
__shared__ int lower;
__shared__ int upper;
lower_array[2 * blockIdx.x] = M;
lower_array[2 * blockIdx.x + 1] = 0;
upper_array[2 * blockIdx.x] = -1;
upper_array[2 * blockIdx.x + 1] = 0;
__syncthreads();
search_lower(lower_bound, M, rel_b, &lower_array[2 * blockIdx.x]);
search_upper(upper_bound, M, rel_b, &upper_array[2 * blockIdx.x]);
lower = lower_array[2 * blockIdx.x] < M? lower_array[2 * blockIdx.x]:lower_array[2 * blockIdx.x + 1];
upper = upper_array[2 * blockIdx.x] >= 0? upper_array[2 * blockIdx.x]:upper_array[2 * blockIdx.x + 1];
if( upper < lower) {
upper = M - 1;
}
out_bound[blockIdx.x] = blockDim.x * ( upper - lower + 1);
/* if(threadIdx.x == 0) {
cuPrintf("lower_bound: %d ret: %d offset: %d\n", lower_bound, lower_array[2 * blockIdx.x], lower_array[2 * blockIdx.x + 1]);
cuPrintf("upper_bound: %d ret: %d offset: %d\n", upper_bound, upper_array[2 * blockIdx.x], upper_array[2 * blockIdx.x + 1]);
cuPrintf("num result tuples: %f\n", out_bound[blockIdx.x]);
}*/
}
void
__global__ brute_join( int3* out, int2* rel_a, int2* rel_b, int num, int N, int M, float* out_bound, float* result_size, int* lower_array, int* upper_array ) {
__shared__ int2 left[512];
__shared__ int2 right[1024];
__shared__ uint count[512];
__shared__ uint index[512];
__shared__ uint scratch[1024];
int lower;
int upper;
int num_right;
lower = lower_array[2 * blockIdx.x] < M? lower_array[2 * blockIdx.x]:lower_array[2 * blockIdx.x + 1];
upper = upper_array[2 * blockIdx.x] >= 0? upper_array[2 * blockIdx.x]:upper_array[2 * blockIdx.x + 1];
if( upper < lower) {
upper = M - 1;
}
num_right = upper - lower + 1;
int threadIndex = threadIdx.x;
int partition = blockIdx.x * blockDim.x;
// counter for each thread
count[threadIndex] = 0;
index[threadIndex] = 0;
// load two relation to the cache, make future access faster
left[threadIndex] = rel_a[partition + threadIndex];
for(int i = 0 ; i < num_right; i+= 512) {
if(i + threadIndex < num_right) {
//cuPrintf("%d\n",lower + i + threadIndex);
right[i + threadIndex] = rel_b[lower + i + threadIndex];
}
__syncthreads();
}
for(int i = 0 ; i < num_right; i++ ) {
if(left[threadIndex].x == right[i].x) {
count[threadIndex] ++;
}
}
__syncthreads();
sharedMemExclusiveScan(threadIndex, count, index, scratch, SCAN_BLOCK_DIM);
int current = 0;
for(int i = 0 ; i < num_right; i++ ) {
if(left[threadIndex].x == right[i].x) {
int j = (int)out_bound[blockIdx.x] + index[threadIndex] + current;
//cuPrintf("out index %d of %d\n", j, num);
if( j < num) {
out[j].x = left[threadIndex].x;
out[j].y = left[threadIndex].y;
out[j].z = right[i].y;
/*if( blockIdx.x == 3) {
cuPrintf("index %d = %d + %d + %d\n", j, (int)out_bound[blockIdx.x], index[threadIndex], current);
}*/
current++;
}
}
}
if(threadIdx.x == 0) {
result_size[blockIdx.x] = count[511] + index[511];
//cuPrintf("result size: %f\n",result_size[blockIdx.x]);
}
}
void
__global__ join_coalesced(int3* result, int3* out, float* result_size, float* histogram, float* out_bound) {
int size = result_size[blockIdx.x];
int result_index;
int out_index;
for(int i = 0; i < size; i += 512) {
if(i + threadIdx.x < size) {
out_index = out_bound[blockIdx.x] + threadIdx.x + i;
result_index = histogram[blockIdx.x] + threadIdx.x + i;
/* if(blockIdx.x == 3) {
cuPrintf("## %d = %d + %d + %d\n", out_index, (int)out_bound[blockIdx.x], threadIdx.x, i);
}*/
result[result_index].x = out[out_index].x;
result[result_index].y = out[out_index].y;
result[result_index].z = out[out_index].z;
}
__syncthreads();
}
}
/*
Implementation of JOIN operationi
rel_a: left relation
rel_b: right relation
N: size of rel_a
M: size of rel_b
*/
struct compare_int2 {
__host__ __device__
bool operator()(int2 a, int2 b) {
return a.x <= b.x;
}
};
void primitive_join(int N, int M) {
// prepare host buffers
int min = 1;
int max = 1024;
int2* rel_a = new int2[N];
int2* rel_b = new int2[M];
int3* result_seq = new int3[4 * N];
int seq_num;
for(int i = 0; i < N; i ++) {
rel_a[i] = make_int2(min + (rand() % (int)(max - min + 1)), min + (rand() % (int)(max - min + 1)) );
}
for(int i = 0; i < M; i ++) {
rel_b[i] = make_int2(min + (rand() % (int)(max - min + 1)), min + (rand() % (int)(max - min + 1)) );
}
thrust::sort(rel_a, rel_a + N, compare_int2());
thrust::sort(rel_b, rel_b + M, compare_int2());
sequential_join(rel_a, rel_b, N, M, result_seq, &seq_num);
// prepare device buffers
const int threadPerBlock = 512;
const int blocks = (N + threadPerBlock - 1) / threadPerBlock;
printf("num blocks: %d\n", blocks);
int2* dev_rel_a;
int2* dev_rel_b;
int* lower_array;
int* upper_array;
float* out_bound;
//float* out_bound_scan;
float* result_size;
float* histogram;
int3* out;
int3* result;
cudaMalloc((void**) &out, sizeof(int3) * N * 4);
cudaMalloc((void**) &result, sizeof(int3) * N * 4);
cudaMalloc((void**) &result_size, sizeof(float) * blocks);
cudaMalloc((void**) &histogram, sizeof(float) * blocks);
cudaMalloc((void**) &out_bound, sizeof(float) * blocks);
cudaMalloc((void**) &lower_array, sizeof(int) * blocks * 2);
cudaMalloc((void**) &upper_array, sizeof(int) * blocks * 2);
cudaMalloc((void**) &dev_rel_a, sizeof(int2) * N);
cudaMalloc((void**) &dev_rel_b, sizeof(int2) * M);
double startTime = CycleTimer::currentSeconds();
cudaMemcpy(dev_rel_a, rel_a, sizeof(int2) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_rel_b, rel_b, sizeof(int2) * M, cudaMemcpyHostToDevice);
cudaPrintfInit();
double startTime_inner = CycleTimer::currentSeconds();
pnary_partition<<< blocks, threadPerBlock >>>(dev_rel_a, dev_rel_b, lower_array, upper_array ,out_bound, N, M);
thrust::device_ptr<float> dev_ptr1(out_bound);
thrust::exclusive_scan(dev_ptr1, dev_ptr1 + blocks, dev_ptr1);
//prescanArray(out_bound, out_bound, blocks);
//deallocBlockSums();
brute_join<<< blocks, threadPerBlock >>>(out, dev_rel_a, dev_rel_b, N * 4 , N, M, out_bound, result_size, lower_array, upper_array);
thrust::device_ptr<float> dev_ptr2(result_size);
thrust::device_ptr<float> dev_ptr3(histogram);
thrust::exclusive_scan(dev_ptr2, dev_ptr2 + blocks, dev_ptr3);
join_coalesced<<<blocks, threadPerBlock>>>(result, out, result_size, histogram, out_bound);
double endTime_inner = CycleTimer::currentSeconds();
printf("time excution from cuda join kernel %.3f ms\n",1000.f * (endTime_inner - startTime_inner));
float* p = new float[blocks];
int numResult = 0;
cudaMemcpy(p, result_size, sizeof(float) * blocks, cudaMemcpyDeviceToHost);
numResult += p[blocks-1];
cudaMemcpy(p, histogram, sizeof(float) * blocks, cudaMemcpyDeviceToHost);
numResult += p[blocks-1];
printf("cuda produces %d tuples\n", numResult);
int3* tmp_check = new int3[numResult];
cudaMemcpy(tmp_check, result, sizeof(int3)*numResult, cudaMemcpyDeviceToHost);
double endTime = CycleTimer::currentSeconds();
printf("time excution from cuda join overall %.3f ms\n",1000.f * (endTime - startTime));
validate_join(result_seq, seq_num, tmp_check, numResult);
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
cudaFree(dev_rel_a);
cudaFree(dev_rel_b);
cudaFree(lower_array);
cudaFree(upper_array);
cudaFree(out_bound);
cudaFree(result_size);
cudaFree(out);
cudaFree(histogram);
cudaFree(result);
// deallocBlockSums();
}
#define N (1024*1024)
#define FULL_DATA_SIZE (N*20)
__global__ void kernel( int *a, int *b, int *c ) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
int idx1 = (idx + 1) % 256;
int idx2 = (idx + 2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
c[idx] = (as + bs) / 2;
}
}
void streamTest() {
cudaDeviceProp prop;
int whichDevice;
HANDLE_ERROR( cudaGetDevice( &whichDevice ) );
HANDLE_ERROR( cudaGetDeviceProperties( &prop, whichDevice ) );
if (!prop.deviceOverlap) {
printf( "Device will not handle overlaps, so no speed up from streams\n" );
}
cudaEvent_t start, stop;
float elapsedTime;
cudaStream_t stream0, stream1;
int *host_a, *host_b, *host_c;
int *dev_a0, *dev_b0, *dev_c0;
int *dev_a1, *dev_b1, *dev_c1;
// start the timers
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
// initialize the streams
HANDLE_ERROR( cudaStreamCreate( &stream0 ) );
HANDLE_ERROR( cudaStreamCreate( &stream1 ) );
// allocate the memory on the GPU
HANDLE_ERROR( cudaMalloc( (void**)&dev_a0,
N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_b0,
N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_c0,
N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_a1,
N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_b1,
N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_c1,
N * sizeof(int) ) );
// allocate host locked memory, used to stream
HANDLE_ERROR( cudaHostAlloc( (void**)&host_a,
FULL_DATA_SIZE * sizeof(int),
cudaHostAllocDefault ) );
HANDLE_ERROR( cudaHostAlloc( (void**)&host_b,
FULL_DATA_SIZE * sizeof(int),
cudaHostAllocDefault ) );
HANDLE_ERROR( cudaHostAlloc( (void**)&host_c,
FULL_DATA_SIZE * sizeof(int),
cudaHostAllocDefault ) );
for (int i=0; i<FULL_DATA_SIZE; i++) {
host_a[i] = rand();
host_b[i] = rand();
}
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
// now loop over full data, in bite-sized chunks
for (int i=0; i<FULL_DATA_SIZE; i+= N*2) {
// enqueue copies of a in stream0 and stream1
HANDLE_ERROR( cudaMemcpyAsync( dev_a0, host_a+i,
N * sizeof(int),
cudaMemcpyHostToDevice,
stream0 ) );
HANDLE_ERROR( cudaMemcpyAsync( dev_a1, host_a+i+N,
N * sizeof(int),
cudaMemcpyHostToDevice,
stream1 ) );
// enqueue copies of b in stream0 and stream1
HANDLE_ERROR( cudaMemcpyAsync( dev_b0, host_b+i,
N * sizeof(int),
cudaMemcpyHostToDevice,
stream0 ) );
HANDLE_ERROR( cudaMemcpyAsync( dev_b1, host_b+i+N,
N * sizeof(int),
cudaMemcpyHostToDevice,
stream1 ) );
// enqueue kernels in stream0 and stream1
kernel<<<N/256,256,0,stream0>>>( dev_a0, dev_b0, dev_c0 );
kernel<<<N/256,256,0,stream1>>>( dev_a1, dev_b1, dev_c1 );
// enqueue copies of c from device to locked memory
HANDLE_ERROR( cudaMemcpyAsync( host_c+i, dev_c0,
N * sizeof(int),
cudaMemcpyDeviceToHost,
stream0 ) );
HANDLE_ERROR( cudaMemcpyAsync( host_c+i+N, dev_c1,
N * sizeof(int),
cudaMemcpyDeviceToHost,
stream1 ) );
}
HANDLE_ERROR( cudaStreamSynchronize( stream0 ) );
HANDLE_ERROR( cudaStreamSynchronize( stream1 ) );
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime,
start, stop ) );
printf( "Time taken: %3.1f ms\n", elapsedTime );
// cleanup the streams and memory
HANDLE_ERROR( cudaFreeHost( host_a ) );
HANDLE_ERROR( cudaFreeHost( host_b ) );
HANDLE_ERROR( cudaFreeHost( host_c ) );
HANDLE_ERROR( cudaFree( dev_a0 ) );
HANDLE_ERROR( cudaFree( dev_b0 ) );
HANDLE_ERROR( cudaFree( dev_c0 ) );
HANDLE_ERROR( cudaFree( dev_a1 ) );
HANDLE_ERROR( cudaFree( dev_b1 ) );
HANDLE_ERROR( cudaFree( dev_c1 ) );
HANDLE_ERROR( cudaStreamDestroy( stream0 ) );
HANDLE_ERROR( cudaStreamDestroy( stream1 ) );
}
void sequentialTest() {
cudaEvent_t start, stop;
float elapsedTime;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
int* host_a = new int[FULL_DATA_SIZE];
int* host_b = new int[FULL_DATA_SIZE];
int* host_c = new int[FULL_DATA_SIZE];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
cudaMalloc( (void**)&dev_a, FULL_DATA_SIZE * sizeof(int));
cudaMalloc( (void**)&dev_b, FULL_DATA_SIZE * sizeof(int));
cudaMalloc( (void**)&dev_c, FULL_DATA_SIZE * sizeof(int));
for (int i=0; i<FULL_DATA_SIZE; i++) {
host_a[i] = rand();
host_b[i] = rand();
}
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
cudaMemcpy(dev_a, host_a, sizeof(int) * FULL_DATA_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, host_b, sizeof(int) * FULL_DATA_SIZE, cudaMemcpyHostToDevice);
kernel<<<FULL_DATA_SIZE/256,256,0>>>( dev_a, dev_b, dev_c);
cudaMemcpyAsync( host_c, dev_c, FULL_DATA_SIZE * sizeof(int), cudaMemcpyDeviceToHost);
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime,
start, stop ) );
printf( "Time taken: %3.1f ms\n", elapsedTime );
}
void
printCudaInfo() {
// for fun, just print out some stats on the machine
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
}
|
d596c4b910e5587c307c58c29d615a43072557e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "async_utils.cuh"
#include "cuda_utils.h"
#include "handle_utils.h"
#include "matrix_utils.h"
#include "pinned_host_vector.h"
#include "preprocessor.h"
#include "stream_allocator.h"
#include "svm_serde.h"
#include <cuml/svm/svm_parameter.h>
#include <thrust/async/copy.h>
#include <thrust/device_vector.h>
#include <cuml/svm/svc.hpp>
#include <Rcpp.h>
#include <memory>
#include <vector>
namespace cuml4r {
namespace {
constexpr auto kSvcKernelParams = "kernel_params";
constexpr auto kSvcSvmParams = "svm_params";
constexpr auto kSvcModel = "model";
class ModelCtx {
public:
using model_t = ML::SVM::SVC<double>;
// model object must be destroyed first
std::unique_ptr<raft::handle_t> const handle_;
std::unique_ptr<model_t> const model_;
__host__ ModelCtx(std::unique_ptr<raft::handle_t> handle,
std::unique_ptr<model_t> model) noexcept
: handle_(std::move(handle)), model_(std::move(model)) {}
__host__ Rcpp::List getState() const {
Rcpp::List state;
state[kSvcKernelParams] = detail::getState(model_->kernel_params);
state[kSvcSvmParams] = detail::getState(model_->param);
state[kSvcModel] =
detail::getState(/*svm_model=*/model_->model, /*handle=*/*handle_);
return state;
}
__host__ void setState(Rcpp::List const& state) {
detail::setState(/*kernel_params=*/model_->kernel_params,
/*state=*/state[kSvcKernelParams]);
detail::setState(/*svm_params=*/model_->param,
/*state=*/state[kSvcSvmParams]);
detail::setState(/*svm_model=*/model_->model, /*handle=*/*handle_,
/*state=*/state[kSvcModel]);
}
};
} // namespace
__host__ SEXP svc_fit(Rcpp::NumericMatrix const& input,
Rcpp::NumericVector const& labels, double const cost,
int const kernel, double const gamma, double const coef0,
int const degree, double const tol, int const max_iter,
int const nochange_steps, double const cache_size,
Rcpp::NumericVector const& sample_weights,
int const verbosity) {
auto const m = cuml4r::Matrix<>(input, /*transpose=*/true);
auto const n_samples = m.numCols;
auto const n_features = m.numRows;
auto stream_view = cuml4r::stream_allocator::getOrCreateStream();
auto handle = std::make_unique<raft::handle_t>();
cuml4r::handle_utils::initializeHandle(*handle, stream_view.value());
// SVM input
auto const& h_input = m.values;
thrust::device_vector<double> d_input(h_input.size());
auto CUML4R_ANONYMOUS_VARIABLE(input_h2d) = cuml4r::async_copy(
stream_view.value(), h_input.cbegin(), h_input.cend(), d_input.begin());
auto h_labels(Rcpp::as<cuml4r::pinned_host_vector<double>>(labels));
thrust::device_vector<double> d_labels(h_labels.size());
auto CUML4R_ANONYMOUS_VARIABLE(labels_h2d) = cuml4r::async_copy(
stream_view.value(), h_labels.cbegin(), h_labels.cend(), d_labels.begin());
thrust::device_vector<double> d_sample_weights;
cuml4r::unique_marker sample_weights_h2d;
if (sample_weights.size() > 0) {
auto const h_sample_weights(
Rcpp::as<cuml4r::pinned_host_vector<double>>(sample_weights));
d_sample_weights.resize(h_sample_weights.size());
sample_weights_h2d =
cuml4r::async_copy(stream_view.value(), h_sample_weights.cbegin(),
h_sample_weights.cend(), d_sample_weights.begin());
}
MLCommon::Matrix::KernelParams kernel_params{
/*kernel=*/static_cast<MLCommon::Matrix::KernelType>(kernel), degree, gamma,
coef0};
// SVM output
auto svc = std::make_unique<ML::SVM::SVC<double>>(
*handle, /*C=*/cost, tol, kernel_params, cache_size, max_iter,
nochange_steps, verbosity);
svc->fit(d_input.data().get(), /*nrows=*/n_samples, /*ncols=*/n_features,
d_labels.data().get(),
d_sample_weights.empty() ? nullptr : d_sample_weights.data().get());
CUDA_RT_CALL(hipStreamSynchronize(stream_view.value()));
return Rcpp::XPtr<ModelCtx>(new ModelCtx(std::move(handle), std::move(svc)));
}
__host__ SEXP svc_predict(SEXP model_xptr, Rcpp::NumericMatrix const& input,
bool predict_class) {
auto const m = cuml4r::Matrix<>(input, /*transpose=*/true);
int const n_samples = m.numCols;
int const n_features = m.numRows;
auto ctx = Rcpp::XPtr<ModelCtx>(model_xptr);
auto const& svc = ctx->model_;
auto* stream = ctx->handle_->get_stream();
// input
auto const& h_input = m.values;
thrust::device_vector<double> d_input(h_input.size());
auto CUML4R_ANONYMOUS_VARIABLE(input_h2d) = cuml4r::async_copy(
stream, h_input.cbegin(), h_input.cend(), d_input.begin());
// output
thrust::device_vector<double> d_preds(n_samples);
if (predict_class) {
svc->predict(/*input=*/d_input.data().get(), /*n_rows=*/n_samples,
/*c_cols=*/n_features, /*preds=*/d_preds.data().get());
} else {
ML::SVM::svcPredict(
/*handle=*/*ctx->handle_, /*input=*/d_input.data().get(),
/*n_rows=*/n_samples,
/*c_cols=*/n_features, /*kernel_parames=*/svc->kernel_params,
/*model=*/svc->model, /*preds=*/d_preds.data().get(),
/*buffer_size=*/svc->param.cache_size, /*predict_class=*/false);
}
cuml4r::pinned_host_vector<double> h_preds(n_samples);
auto CUML4R_ANONYMOUS_VARIABLE(preds_d2h) = cuml4r::async_copy(
stream, d_preds.cbegin(), d_preds.cend(), h_preds.begin());
CUDA_RT_CALL(hipStreamSynchronize(stream));
return Rcpp::NumericVector(h_preds.begin(), h_preds.end());
}
__host__ Rcpp::List svc_get_state(SEXP model) {
return Rcpp::XPtr<ModelCtx>(model)->getState();
}
__host__ SEXP svc_set_state(Rcpp::List const& state) {
auto stream_view = cuml4r::stream_allocator::getOrCreateStream();
auto handle = std::make_unique<raft::handle_t>();
cuml4r::handle_utils::initializeHandle(*handle, stream_view.value());
auto model = std::make_unique<ML::SVM::SVC<double>>(*handle);
auto model_ctx = std::make_unique<ModelCtx>(
/*handle=*/std::move(handle), /*model=*/std::move(model));
model_ctx->setState(state);
return Rcpp::XPtr<ModelCtx>(model_ctx.release());
}
} // namespace cuml4r
| d596c4b910e5587c307c58c29d615a43072557e2.cu | #include "async_utils.cuh"
#include "cuda_utils.h"
#include "handle_utils.h"
#include "matrix_utils.h"
#include "pinned_host_vector.h"
#include "preprocessor.h"
#include "stream_allocator.h"
#include "svm_serde.h"
#include <cuml/svm/svm_parameter.h>
#include <thrust/async/copy.h>
#include <thrust/device_vector.h>
#include <cuml/svm/svc.hpp>
#include <Rcpp.h>
#include <memory>
#include <vector>
namespace cuml4r {
namespace {
constexpr auto kSvcKernelParams = "kernel_params";
constexpr auto kSvcSvmParams = "svm_params";
constexpr auto kSvcModel = "model";
class ModelCtx {
public:
using model_t = ML::SVM::SVC<double>;
// model object must be destroyed first
std::unique_ptr<raft::handle_t> const handle_;
std::unique_ptr<model_t> const model_;
__host__ ModelCtx(std::unique_ptr<raft::handle_t> handle,
std::unique_ptr<model_t> model) noexcept
: handle_(std::move(handle)), model_(std::move(model)) {}
__host__ Rcpp::List getState() const {
Rcpp::List state;
state[kSvcKernelParams] = detail::getState(model_->kernel_params);
state[kSvcSvmParams] = detail::getState(model_->param);
state[kSvcModel] =
detail::getState(/*svm_model=*/model_->model, /*handle=*/*handle_);
return state;
}
__host__ void setState(Rcpp::List const& state) {
detail::setState(/*kernel_params=*/model_->kernel_params,
/*state=*/state[kSvcKernelParams]);
detail::setState(/*svm_params=*/model_->param,
/*state=*/state[kSvcSvmParams]);
detail::setState(/*svm_model=*/model_->model, /*handle=*/*handle_,
/*state=*/state[kSvcModel]);
}
};
} // namespace
__host__ SEXP svc_fit(Rcpp::NumericMatrix const& input,
Rcpp::NumericVector const& labels, double const cost,
int const kernel, double const gamma, double const coef0,
int const degree, double const tol, int const max_iter,
int const nochange_steps, double const cache_size,
Rcpp::NumericVector const& sample_weights,
int const verbosity) {
auto const m = cuml4r::Matrix<>(input, /*transpose=*/true);
auto const n_samples = m.numCols;
auto const n_features = m.numRows;
auto stream_view = cuml4r::stream_allocator::getOrCreateStream();
auto handle = std::make_unique<raft::handle_t>();
cuml4r::handle_utils::initializeHandle(*handle, stream_view.value());
// SVM input
auto const& h_input = m.values;
thrust::device_vector<double> d_input(h_input.size());
auto CUML4R_ANONYMOUS_VARIABLE(input_h2d) = cuml4r::async_copy(
stream_view.value(), h_input.cbegin(), h_input.cend(), d_input.begin());
auto h_labels(Rcpp::as<cuml4r::pinned_host_vector<double>>(labels));
thrust::device_vector<double> d_labels(h_labels.size());
auto CUML4R_ANONYMOUS_VARIABLE(labels_h2d) = cuml4r::async_copy(
stream_view.value(), h_labels.cbegin(), h_labels.cend(), d_labels.begin());
thrust::device_vector<double> d_sample_weights;
cuml4r::unique_marker sample_weights_h2d;
if (sample_weights.size() > 0) {
auto const h_sample_weights(
Rcpp::as<cuml4r::pinned_host_vector<double>>(sample_weights));
d_sample_weights.resize(h_sample_weights.size());
sample_weights_h2d =
cuml4r::async_copy(stream_view.value(), h_sample_weights.cbegin(),
h_sample_weights.cend(), d_sample_weights.begin());
}
MLCommon::Matrix::KernelParams kernel_params{
/*kernel=*/static_cast<MLCommon::Matrix::KernelType>(kernel), degree, gamma,
coef0};
// SVM output
auto svc = std::make_unique<ML::SVM::SVC<double>>(
*handle, /*C=*/cost, tol, kernel_params, cache_size, max_iter,
nochange_steps, verbosity);
svc->fit(d_input.data().get(), /*nrows=*/n_samples, /*ncols=*/n_features,
d_labels.data().get(),
d_sample_weights.empty() ? nullptr : d_sample_weights.data().get());
CUDA_RT_CALL(cudaStreamSynchronize(stream_view.value()));
return Rcpp::XPtr<ModelCtx>(new ModelCtx(std::move(handle), std::move(svc)));
}
__host__ SEXP svc_predict(SEXP model_xptr, Rcpp::NumericMatrix const& input,
bool predict_class) {
auto const m = cuml4r::Matrix<>(input, /*transpose=*/true);
int const n_samples = m.numCols;
int const n_features = m.numRows;
auto ctx = Rcpp::XPtr<ModelCtx>(model_xptr);
auto const& svc = ctx->model_;
auto* stream = ctx->handle_->get_stream();
// input
auto const& h_input = m.values;
thrust::device_vector<double> d_input(h_input.size());
auto CUML4R_ANONYMOUS_VARIABLE(input_h2d) = cuml4r::async_copy(
stream, h_input.cbegin(), h_input.cend(), d_input.begin());
// output
thrust::device_vector<double> d_preds(n_samples);
if (predict_class) {
svc->predict(/*input=*/d_input.data().get(), /*n_rows=*/n_samples,
/*c_cols=*/n_features, /*preds=*/d_preds.data().get());
} else {
ML::SVM::svcPredict(
/*handle=*/*ctx->handle_, /*input=*/d_input.data().get(),
/*n_rows=*/n_samples,
/*c_cols=*/n_features, /*kernel_parames=*/svc->kernel_params,
/*model=*/svc->model, /*preds=*/d_preds.data().get(),
/*buffer_size=*/svc->param.cache_size, /*predict_class=*/false);
}
cuml4r::pinned_host_vector<double> h_preds(n_samples);
auto CUML4R_ANONYMOUS_VARIABLE(preds_d2h) = cuml4r::async_copy(
stream, d_preds.cbegin(), d_preds.cend(), h_preds.begin());
CUDA_RT_CALL(cudaStreamSynchronize(stream));
return Rcpp::NumericVector(h_preds.begin(), h_preds.end());
}
__host__ Rcpp::List svc_get_state(SEXP model) {
return Rcpp::XPtr<ModelCtx>(model)->getState();
}
__host__ SEXP svc_set_state(Rcpp::List const& state) {
auto stream_view = cuml4r::stream_allocator::getOrCreateStream();
auto handle = std::make_unique<raft::handle_t>();
cuml4r::handle_utils::initializeHandle(*handle, stream_view.value());
auto model = std::make_unique<ML::SVM::SVC<double>>(*handle);
auto model_ctx = std::make_unique<ModelCtx>(
/*handle=*/std::move(handle), /*model=*/std::move(model));
model_ctx->setState(state);
return Rcpp::XPtr<ModelCtx>(model_ctx.release());
}
} // namespace cuml4r
|
d2d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "cuda_util.h"
int main() {
const int count = 1024 * 1024;
int *first = NULL, *second = NULL;
int device_count;
hipGetDeviceCount(&device_count);
printf("device count: %d\n", device_count);
{
CUDACHECK(hipSetDevice(0));
CUDACHECK(hipMalloc((void**)&first, sizeof(int) * count));
CUDACHECK(hipMemset(first, 0, sizeof(int) * count));
}
{
CUDACHECK(hipSetDevice(1));
CUDACHECK(hipMalloc((void**)&second, sizeof(int) * count));
CUDACHECK(hipMemset(second, 1, sizeof(int) * count));
}
hipMemcpy(first, second, sizeof(int) * count, hipMemcpyDeviceToDevice);
{
CUDACHECK(hipSetDevice(1));
int *data = (int*)malloc(sizeof(int) * count);
memset(data, 0, sizeof(int) * count);
CUDACHECK(hipMemcpy(data, first, sizeof(int) * count, hipMemcpyDeviceToHost));
printf("data: %d\n", data[0]);
free(data);
}
hipFree(first);
hipFree(second);
return 0;
}
| d2d.cu | #include <stdio.h>
#include "cuda_runtime.h"
#include "cuda_util.h"
int main() {
const int count = 1024 * 1024;
int *first = NULL, *second = NULL;
int device_count;
cudaGetDeviceCount(&device_count);
printf("device count: %d\n", device_count);
{
CUDACHECK(cudaSetDevice(0));
CUDACHECK(cudaMalloc((void**)&first, sizeof(int) * count));
CUDACHECK(cudaMemset(first, 0, sizeof(int) * count));
}
{
CUDACHECK(cudaSetDevice(1));
CUDACHECK(cudaMalloc((void**)&second, sizeof(int) * count));
CUDACHECK(cudaMemset(second, 1, sizeof(int) * count));
}
cudaMemcpy(first, second, sizeof(int) * count, cudaMemcpyDeviceToDevice);
{
CUDACHECK(cudaSetDevice(1));
int *data = (int*)malloc(sizeof(int) * count);
memset(data, 0, sizeof(int) * count);
CUDACHECK(cudaMemcpy(data, first, sizeof(int) * count, cudaMemcpyDeviceToHost));
printf("data: %d\n", data[0]);
free(data);
}
cudaFree(first);
cudaFree(second);
return 0;
}
|
8f826c8ab94d95964ee25b60b6c576c6ceecc099.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_fdim.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
double *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_fdim), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_fdim), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_fdim), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 8f826c8ab94d95964ee25b60b6c576c6ceecc099.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_fdim.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
double *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_fdim<<<gridBlock,threadBlock>>>(n,result,x,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_fdim<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_fdim<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e7e5849c14e4f39c8eb5bee611aa2dcf4c25ec39.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "GPUrender.cuh"
// 20, 12
//gpu
__constant__ int gResolution;
__constant__ int gVolumeSize[3];
__constant__ float gBlockSize[3];//volume 8 x, y, z
__constant__ float gEye[3];
__constant__ float gDir[3];
__constant__ float gCross[3];
__constant__ float gU[3];
__constant__ float gL[3];
__constant__ int gValidDir[3];
__constant__ bool emptyBlock[32*32*29];
//__constant__ bool emptyBlock[*40*40]; // 40 ?
hipArray* hipArray = {0};
GLuint pbo = 0; // EyeBody pixel buffer object
struct cudaGraphicsResource *cuda_pbo_resource;
//gpu
texture<unsigned char, 3, hipReadModeNormalizedFloat> texPtr;
__inline__ __host__ __device__ void vec_add(float a[3], float b[3], float c[3]){
c[0] = a[0] + b[0];
c[1] = a[1] + b[1];
c[2] = a[2] + b[2];
}
__inline__ __host__ __device__ void vec_sub(float a[3], float b[3], float c[3]){
c[0] = a[0] - b[0];
c[1] = a[1] - b[1];
c[2] = a[2] - b[2];
}
__inline__ __host__ __device__ void s_product(float a[3], float size, float b[3]){
b[0] = a[0] * size;
b[1] = a[1] * size;
b[2] = a[2] * size;
}
__inline__ __host__ __device__ void cross_product(float a[3], float b[3], float c[3])
{
c[0] = a[1]*b[2] - a[2]*b[1];
c[1] = a[2]*b[0] - a[0]*b[2];
c[2] = a[0]*b[1] - a[1]*b[0];
}
__inline__ __host__ __device__ float vec_lenth(float a[3]){
return sqrtf(a[0]*a[0] + a[1]*a[1] + a[2]*a[2]);
}
__inline__ __host__ __device__ float inner_product(float a[3], float b[3]){
float buf = a[0]*b[0];
buf += a[1]*b[1];
buf += a[2]*b[2];
return buf;
}
__inline__ __device__ void getNormal(float pos[3], float N[3]){
N[0] = (tex3D(texPtr, pos[0]+1, pos[1], pos[2]) - tex3D(texPtr, pos[0]-1, pos[1], pos[2]))/2.0f;
N[1] = (tex3D(texPtr, pos[0], pos[1]+1, pos[2]) - tex3D(texPtr, pos[0], pos[1]-1, pos[2]))/2.0f;
N[2] = (tex3D(texPtr, pos[0], pos[1], pos[2]+1) - tex3D(texPtr, pos[0], pos[1], pos[2]-1))/2.0f;
float len = vec_lenth(N);
if(len != 0)//0
s_product(N, 1/vec_lenth(N), N); //
}
__inline__ __device__ float sign(float a){
if(a > 0)
return 1.0f;
if(a < 0)
return -1.0f;
return 0.0f;
}
//parallel
__inline__ __device__ bool IsIntersectRayBox1(float& startT, float& endT, float pos[3], int tx, int ty){
float buf[3];
float start[3];
float dx[3], dy[3];
float delta[3];
float Max[3], Min[3];//x, y, z ,
int j = 0;
s_product(gCross, tx-gResolution*0.5f, dx);//x
s_product(gU, ty-gResolution*0.5f, dy);//y
vec_add(dx, dy, delta);//x+y = point
vec_add(gEye, delta, start);//start+eye = start <-
for(int i = 0; i < 3; i++){
float a, b;
if(gValidDir[i] == 1){
a = (gVolumeSize[i]-1 - start[i])/ gDir[i];
b = (0.0f - start[i])/ gDir[i];
if(a > b){//
Max[j] = a;
Min[j] = b;
}
else{
Max[j] = b;
Min[j] = a;
}
j++;
}
}
startT = Min[0];
for(int i = 1; i < j; i++){//Min Max . = startT .
if(startT < Min[i])
startT = Min[i];
}
startT += 0.001f;
endT = Max[0];
for(int i = 1; i < j; i++){//Max Min . - endT .
if(endT > Max[i])
endT = Max[i];
}
endT -= 0.001f;
// .
s_product(gDir, startT, buf);
vec_add(start, buf, pos);
//
float maxBox[3] = {gVolumeSize[0]-1, gVolumeSize[1]-1, gVolumeSize[2]-1};
float minBox[3] = {0.0f, 0.0f, 0.0f};
float result1[3];
float result2[3];
for(int i = 0; i < 3; i++){
result1[i] = sign(minBox[i]- pos[i]);
result2[i] = sign(pos[i] - maxBox[i]);
}
float k = inner_product(result1, result2);
if(k == 3.0f)
return true;
return false;
}
__inline__ __device__ int EmptySpaceLeap1(float pos[3]){
// .
int dt = 0;
float currentBox[3] = {floorf(pos[0]*0.125f), floorf(pos[1]*0.125f), floorf(pos[2]*0.125f)};
float currentBoxId = currentBox[0]+ currentBox[1]*gBlockSize[0] + currentBox[2]*gBlockSize[0]*gBlockSize[1];
if(emptyBlock[(int)currentBoxId]){
while(true){
dt++;
vec_add(pos, gDir, pos);
float forwardBox[3] = {floorf(pos[0]*0.125f), floorf(pos[1]*0.125f), floorf(pos[2]*0.125f)};
float forwardBoxId = forwardBox[0]+ forwardBox[1]*gBlockSize[0] + forwardBox[2]*gBlockSize[0]*gBlockSize[1];
// .
if(currentBoxId != forwardBoxId)
return dt;
}
}
//
return dt;
}
__inline__ __device__ float AlphaBlending1(float4* PIT, float pos[3], float3& cAcc, const float aOld){
unsigned char nowData = (unsigned char)(tex3D(texPtr, pos[0], pos[1], pos[2])*255.0f);
unsigned char nextData = (unsigned char)(tex3D(texPtr, pos[0]+gDir[0], pos[1]+gDir[1], pos[2]+gDir[2])*255.0f);
float N[3];//
getNormal(pos, N);// .
float NL = fabs(inner_product(N, gL));//N L -
float NH = fabs(pow(inner_product(N, gDir), 16));
float light = 0.2f + 0.7f*NL + 0.1f*NH;
if(light > 1.0f)
light = 1.0f;
int index = nowData*256 + nextData;
light *= 1.0f-aOld;
float alpha = PIT[index].w;
cAcc.x += PIT[index].x*light;
cAcc.y += PIT[index].y*light;
cAcc.z += PIT[index].z*light;
return 1.0f-(1.0f-aOld) * (1.0f-alpha);//
}
__inline__ __device__ float3 RayTracing1(float4* PIT, float start[3], const float startT, const float endT){
float pos[3] = {start[0], start[1], start[2]};
float aNew = 0.0f;
float aOld = 0.0f;
float3 cAcc = {0};
for(float t = startT; t <= endT; t+=1.0f){
int dt = EmptySpaceLeap1(pos);
//dt
if(dt){//
t+=dt-1.0f;
continue;
}
aNew = AlphaBlending1(PIT, pos, cAcc, aOld);
//Early Ray Termination
if(aNew > 0.99f)
break;
aOld = aNew;
vec_add(pos, gDir, pos);
}
return cAcc;
}
__global__ void G_Parallel(unsigned char* tex, float4* PIT){
//const int tx = blockDim.x*blockIdx.x + threadIdx.x;// x
//const int ty = blockDim.y*blockIdx.y + threadIdx.y;// y
//const int locTexture = ty*256 + tx;//
const int locTexture = blockDim.x*blockIdx.x + threadIdx.x;
const int ty = locTexture/gResolution;
const int tx = locTexture%gResolution;
float pos[3];
float startT, endT;
//IsIntersectRayBox T T .
if(!IsIntersectRayBox1(startT, endT, pos, tx, ty)){
tex[locTexture*3] = 0;
tex[locTexture*3 + 1] = 0;
tex[locTexture*3 + 2] = 0;
return;//
}
float3 cAcc = RayTracing1(PIT, pos, startT, endT);
tex[locTexture*3] = (unsigned char)(cAcc.x*255.0f);
tex[locTexture*3 + 1] = (unsigned char)(cAcc.y*255.0f);
tex[locTexture*3 + 2] = (unsigned char)(cAcc.z*255.0f);
}
//parallel
//perspective
__inline__ __device__ bool IsIntersectRayBox2(float& startT, float& endT, float pos[3], float dir[3], int tx, int ty){
float buf[3];
float f[3];
float cameraCenter[3];
float start[3];
float Max[3], Min[3];//x, y, z ,
float dx[3], dy[3];
float delta[3];
int j = 0;
s_product(gCross, tx-gResolution*0.5f, dx);//x
s_product(dx, 0.005f, dx);
s_product(gU, ty-gResolution*0.5f, dy);//y
s_product(dy, 0.005f, dy);
vec_add(dx, dy, delta);//dx+dy = delta
s_product(gDir, 1.0f, f);
vec_add(gEye, f, cameraCenter);
vec_add(cameraCenter, delta, start);
vec_sub(start, gEye, dir);// dir .
s_product(dir, 1/vec_lenth(dir), dir);
for(int i = 0; i < 3; i++){
float a, b;
if(gValidDir[i] == 1){
a = (gVolumeSize[i]-1 - start[i])/ dir[i];
b = (0.0f - start[i])/ dir[i];
if(a > b){//
Max[j] = a;
Min[j] = b;
}
else{
Max[j] = b;
Min[j] = a;
}
j++;
}
}
endT = Max[0];
for(int i = 1; i < j; i++){//Max Min . - endT .
if(endT > Max[i])
endT = Max[i];
}
endT -= 0.001f;
//
float maxBox[3] = {gVolumeSize[0], gVolumeSize[1], gVolumeSize[2]};
float minBox[3] = {0.0f, 0.0f, 0.0f};
float result1[3];
float result2[3];
float k;
for(int i = 0; i < 3; i++){
result1[i] = sign(minBox[i]- start[i]);
result2[i] = sign(start[i] - maxBox[i]);
}
k = inner_product(result1, result2);
//start .
if(k == 3.0f){//
pos[0] = start[0];
pos[1] = start[1];
pos[2] = start[2];
startT = 0.0f;
return true;
}
// .
startT = Min[0];
for(int i = 1; i < j; i++){//Min Max . = startT .
if(startT < Min[i])
startT = Min[i];
}
startT += 0.001f;
//
s_product(dir, startT, buf);
vec_add(start, buf, pos);
for(int i = 0; i < 3; i++){
result1[i] = sign(minBox[i]- pos[i]);
result2[i] = sign(pos[i] - maxBox[i]);
}
k = inner_product(result1, result2);
if(k == 3.0f)//
return true;
return false;
}
__inline__ __device__ int EmptySpaceLeap2(float pos[3], float dir[3]){
// .
int dt = 0;
float currentBox[3] = {floorf(pos[0]*0.125f), floorf(pos[1]*0.125f), floorf(pos[2]*0.125f)};
float currentBoxId = currentBox[0]+ currentBox[1]*gBlockSize[0] + currentBox[2]*gBlockSize[0]*gBlockSize[1];
if(emptyBlock[(int)currentBoxId]){
while(true){
dt++;
vec_add(pos, dir, pos);
float forwardBox[3] = {floorf(pos[0]*0.125f), floorf(pos[1]*0.125f), floorf(pos[2]*0.125f)};
float forwardBoxId = forwardBox[0]+ forwardBox[1]*gBlockSize[0] + forwardBox[2]*gBlockSize[0]*gBlockSize[2];
// .
if(currentBoxId != forwardBoxId)
break;
}
}
//
return dt;
}
__inline__ __device__ float AlphaBlending2(float4* PIT, float pos[3], float dir[3], float3& cAcc, const float aOld){
unsigned char nowData = (unsigned char)(tex3D(texPtr, pos[0], pos[1], pos[2])*255.0f);
unsigned char nextData = (unsigned char)(tex3D(texPtr, pos[0]+dir[0], pos[1]+dir[1], pos[2]+dir[2])*255.0f);
float N[3];//
getNormal(pos, N);// .
float NL = fabs(inner_product(N, gL));//N L -
float NH = fabs(pow(inner_product(N, gDir), 16));
float light = 0.2f + 0.7f*NL + 0.1f*NH;
if(light > 1.0f)
light = 1.0f;
int index = nowData*256 + nextData;
light *= 1.0f-aOld;
float alpha = PIT[index].w;
cAcc.x += PIT[index].x*light;
cAcc.y += PIT[index].y*light;
cAcc.z += PIT[index].z*light;
return 1.0f-(1.0f-aOld) * (1.0f-alpha);//
}
__inline__ __device__ float3 RayTracing2(float4* PIT, float start[3], float dir[3], const float startT, const float endT){
float pos[3] = {start[0], start[1], start[2]};
float aNew = 0.0f;
float aOld = 0.0f;
float3 cAcc = {0};
for(float t = startT; t <= endT; t+=1.0f){
int dt = EmptySpaceLeap2(pos, dir);
//
if(dt){
t+=dt-1.0f;
continue;
}
//
aNew = AlphaBlending2(PIT, pos, dir, cAcc, aOld);
//Early Ray Termination
if(aNew > 0.99f)
break;
aOld = aNew;
vec_add(pos, dir, pos);
}
return cAcc;
}
__global__ void G_Perspective(unsigned char* tex, float4* PIT){
const int locTexture = blockDim.x*blockIdx.x + threadIdx.x;
const int ty = locTexture/gResolution;
const int tx = locTexture%gResolution;
float pos[3];
float dir[3];
float startT, endT;
//IsIntersectRayBox T T .
if(!IsIntersectRayBox2(startT, endT, pos, dir, tx, ty)){
tex[locTexture*3] = 0;
tex[locTexture*3 + 1] = 0;
tex[locTexture*3 + 2] = 0;
return;//
}
float3 cAcc = RayTracing2(PIT, pos, dir, startT, endT);
tex[locTexture*3] = (unsigned char)(cAcc.x*255.0f);
tex[locTexture*3 + 1] = (unsigned char)(cAcc.y*255.0f);
tex[locTexture*3 + 2] = (unsigned char)(cAcc.z*255.0f);
}
//perspective
__global__ void ChangeAlpha(float* alphaTable, int* transparentTable, int* aSAT){
int i = threadIdx.x;
if(alphaTable[i] == 0)
transparentTable[i] = 0;
else
transparentTable[i] = 1;
__syncthreads();
for(int j = i; j < 256; j++)
atomicAdd(&aSAT[j+1], transparentTable[i]);
if(i == 0)
aSAT[0] = 0;
}
__global__ void InitMinMaxEmptyBlock(unsigned char* emptyBlockMax, unsigned char* emptyBlockMin){
int i = blockDim.x*blockIdx.x + threadIdx.x;
int bz = i/((int)gBlockSize[0]*(int)gBlockSize[1]);
int by = (i%((int)gBlockSize[0]*(int)gBlockSize[1]))/gBlockSize[0];
int bx = i%(int)gBlockSize[1];
float m = 1.0f;
float M = 0.0f;
int vz, vy, vx;
for(vz = bz*8; vz <= bz*8 + 8; vz++){
if(vz > gVolumeSize[2]-1)
break;
for(vy = by*8; vy <= by*8 + 8; vy++){
for(vx = bx*8; vx <= bx*8 + 8; vx++){
float data = tex3D(texPtr, vx, vy, vz);
M = max(M, data);
m = min(m, data);
}
}
}
emptyBlockMin[i] = (unsigned char)(m*255);
emptyBlockMax[i] = (unsigned char)(M*255);
}
__global__ void InitEmptyBlock(bool* emptyBlock, unsigned char* emptyBlockMax, unsigned char* emptyBlockMin, int* aSAT){
int i = blockDim.x*blockIdx.x + threadIdx.x;
int *pSAT = &(aSAT[1]);
emptyBlock[i] = (pSAT[emptyBlockMax[i]] == pSAT[emptyBlockMin[i]-1]) ? true : false;
}
__global__ void InitPreIntegration(float4* pit, float* alphaTable, float3* colorTable){
int i = threadIdx.x;
if(i == 255){
for(int j = 0; j < 256; j++){
float A = alphaTable[j];
int index = j*256+j;
pit[index].w = A;
pit[index].x = colorTable[j].x * A;
pit[index].y = colorTable[j].y * A;
pit[index].z = colorTable[j].z * A;
}
return;
}
int k = 255 - i;
float samplingTable[256];
for(int j = 0; j < 256; j++){
float A = alphaTable[j];//+j alpha . s == e
A = 1.0f - pow(1.0f - A, 1.0f/k);
samplingTable[j] = A;
}
int e = 255-i;
int s = 0;
for( ; s <= i; s++,e++){
float A = 0, aOld = 0, aNew = 0;
float3 cAcc = {0};
// /
for(int j = s; j < e; j++){
float k = 1.0f - aOld;
A = samplingTable[j];
aNew = 1.0f - k*(1.0f - A);
cAcc.x += k*colorTable[j].x*A;
cAcc.y += k*colorTable[j].y*A;
cAcc.z += k*colorTable[j].z*A;
if(aNew > 0.99f)
break;
aOld = aNew;
}
int index = s*256 + e;
pit[index].x = cAcc.x;
pit[index].y = cAcc.y;
pit[index].z = cAcc.z;
pit[index].w = aNew;
// /
aOld = 0, aNew = 0;
cAcc.x = cAcc.y = cAcc.z = 0;
for(int j = e; j > s; j--){
float k = 1.0f - aOld;
A = samplingTable[j];
aNew = 1.0f - k*(1.0f - A);
cAcc.x += k*colorTable[j].x*A;
cAcc.y += k*colorTable[j].y*A;
cAcc.z += k*colorTable[j].z*A;
if(aNew > 0.99f)
break;
aOld = aNew;
}
index = e*256 + s;
pit[index].x = cAcc.x;
pit[index].y = cAcc.y;
pit[index].z = cAcc.z;
pit[index].w = aNew;
}
}
GPUrender::GPUrender(){
PerspectiveView = false;
eye[0] = eye[1] = eye[2] = 0;
float sqr = 1/sqrtf(3);
L[0] = L[1] = L[2] = sqr;
up[0] = up[1] = 0;
up[2] = -1;
validDir[0] = validDir[1] = validDir[2] = 0;
zoom = 1.0f;
resolution = 256;
pbo = 0;
}
void GPUrender::InitColor(){
float* gAlphaTable;
int* gTransparentTable;
int* gSAT;
float3* gColorTable;
bool* gEmptyBlock;
float time;
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipMalloc((void**)&gSAT, sizeof(int)*257);
hipMalloc((void**)&gAlphaTable, sizeof(float)*256);
hipMalloc((void**)&gTransparentTable, sizeof(int)*256);
hipMalloc((void**)&gColorTable, sizeof(float3)*256);
hipMemcpy(gAlphaTable, alphaTable, sizeof(float)*256, hipMemcpyHostToDevice);
hipMemcpy(gColorTable, colorTable, sizeof(float3)*256, hipMemcpyHostToDevice);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( ChangeAlpha), dim3(1), dim3(256), 0, 0, gAlphaTable, gTransparentTable, gSAT);
hipEventRecord(end, 0);
hipEventSynchronize(end);
hipEventElapsedTime(&time, start, end);
printf("InitAlpha time = %fms\n", time);
hipFree(gTransparentTable);
hipMalloc((void**)&gEmptyBlock, sizeof(bool)*blockSize[0]*blockSize[1]*blockSize[2]);
hipMemset(gEmptyBlock, 0, blockSize[0]*blockSize[1]*blockSize[2]*sizeof(bool));
hipEventRecord(start, 0);
hipLaunchKernelGGL(( InitEmptyBlock), dim3(58), dim3(512), 0, 0, gEmptyBlock, gEmptyBlockMax, gEmptyBlockMin, gSAT);
hipEventRecord(end, 0);
hipEventSynchronize(end);
hipEventElapsedTime(&time, start, end);
printf("InitEmptyBlock time = %fms\n", time);
// .
//hipMemset(emptyBlock, 0, blockSize[0]*blockSize[1]*blockSize[2]*sizeof(bool));
hipMemcpyToSymbol(emptyBlock, gEmptyBlock, sizeof(bool)*blockSize[0]*blockSize[1]*blockSize[2], 0, hipMemcpyDeviceToDevice);
hipFree(gEmptyBlock);
hipFree(gSAT);
hipMalloc((void**)&gPIT, sizeof(float4)*256*256);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( InitPreIntegration), dim3(1), dim3(256), 0, 0, gPIT, gAlphaTable, gColorTable);
hipEventRecord(end, 0);
hipEventSynchronize(end);
hipEventElapsedTime(&time, start, end);
printf("InitPreintegration time = %fms\n", time);
hipEventDestroy(start);
hipEventDestroy(end);
hipFree(gAlphaTable);
hipFree(gColorTable);
}
void GPUrender::InitGpuConst(){
vec_sub(at, eye, dir); //dir
s_product(dir, 1.0f/vec_lenth(dir), dir); //dir
cross_product(up, dir, cross);//cross
s_product(cross, (256.0f/resolution)*zoom/vec_lenth(cross), cross);//cross <- x
//s_product(cross, zoom, cross);//
cross_product(dir, cross, u);//u
s_product(u, (256.0f/resolution)*zoom/vec_lenth(u), u);//u <- y
//s_product(u, zoom, u);//
if(dir[0] != 0)//x 0
validDir[0] = 1;
if(dir[1] != 0)//y 0
validDir[1] = 1;
if(dir[2] != 0)//z 0
validDir[2] = 1;
//gpu
int const_size = sizeof(float)*3;//
hipMemcpyToSymbol(gEye, eye, const_size);
hipMemcpyToSymbol(gDir, dir, const_size);
hipMemcpyToSymbol(gCross, cross, const_size);
hipMemcpyToSymbol(gU, u, const_size);
hipMemcpyToSymbol(gL, L, const_size);
hipMemcpyToSymbol(gValidDir, validDir, sizeof(int)*3);
hipMemcpyToSymbol(gResolution, &resolution, sizeof(int)*1);
}
void GPUrender::InitPixelBuffer(){
glGenBuffers(1, &pbo);// .
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
// (GLenum target,GLuint buffer)
// API https://www.EyeBody.org/sdk/docs/man/html/glBindBuffer.xhtml
//GL_PIXEL_UNPACK_BUFFER Texture data source
glBufferData(GL_PIXEL_UNPACK_BUFFER,
3*resolution*resolution*sizeof(GLubyte),
0,
GL_STREAM_DRAW);
// ( )
//glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
hipGraphicsGLRegisterBuffer(&cuda_pbo_resource,
pbo,
hipGraphicsMapFlagsNone);
}
void GPUrender::Rendering() {
unsigned char* gTex;
//hipError_t result;//
hipEvent_t start, end;
float time;
//EyeBody
hipGraphicsMapResources(1, &cuda_pbo_resource, 0);//1
hipGraphicsResourceGetMappedPointer((void **)&gTex, NULL, cuda_pbo_resource);// .()
//x = 8*32 = 256, y = 16*16 = 256 => Texture
//dim3 Dg(8, 16, 1);
//dim3 Db(32, 16, 1);//32*16 = 512
int block = resolution*resolution/512;
//
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, 0);
//
if(PerspectiveView)
hipLaunchKernelGGL(( G_Perspective), dim3(block), dim3(512), 0, 0, gTex, gPIT);
else
hipLaunchKernelGGL(( G_Parallel), dim3(block), dim3(512), 0, 0, gTex, gPIT);
hipEventRecord(end, 0);
hipEventSynchronize(end);
hipEventElapsedTime(&time, start, end);
hipEventDestroy(start);
hipEventDestroy(end);
printf("Renter time = %fms\n", time);
//
// EyeBody
hipGraphicsUnmapResources(1, &cuda_pbo_resource, 0);
}
void GPUrender::DrawTexture(){
glClear(GL_COLOR_BUFFER_BIT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, 3, resolution, resolution, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
glEnable(GL_TEXTURE_2D);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
glBegin(GL_QUADS);
glTexCoord2f(0.0, 0.0); glVertex2f(-1.0, -1.0);
glTexCoord2f(0.0, 1.0); glVertex2f(-1.0, 1.0);
glTexCoord2f(1.0, 1.0); glVertex2f(1.0, 1.0);
glTexCoord2f(1.0, 0.0); glVertex2f(1.0, -1.0);
glEnd();
}
void GPUrender::MouseRotateEye(int x, int y){
/* 1. eye at (A) .
2. eye cross up x,y
3. eye at A .
*/
//1
float A = sqrtf((eye[0]-at[0])*(eye[0]-at[0]) + (eye[1]-at[1])*(eye[1]-at[1]) + (eye[2]-at[2])*(eye[2]-at[2]));
//2
eye[0] += -x*cross[0] + y*u[0];
eye[1] += -x*cross[1] + y*u[1];
eye[2] += -x*cross[2] + y*u[2];
vec_sub(at, eye, dir); //dir
s_product(dir, 1.0f/vec_lenth(dir), dir); //dir
//3
s_product(dir, A, dir);
vec_sub(at, dir, eye);
InitGpuConst();
}
void GPUrender::ForwardEye(bool forward){
if(forward){
if(PerspectiveView){
float buf[3];
s_product(dir, 8, buf);
vec_add(eye, buf, eye);
}
else
zoom /= 1.1f;
}
else{
if(PerspectiveView){
float buf[3];
s_product(dir, 8, buf);
vec_sub(eye, buf, eye);
}
else
zoom *= 1.1f;
}
printf("eye (%.3f, %.3f, %.3f)\n", eye[0], eye[1], eye[2]);
printf("dir (%.3f, %.3f, %.3f)\n", dir[0], dir[1], dir[2]);
InitGpuConst();
}
void GPUrender::ChangeResolution(int n){
resolution = n;
InitPixelBuffer();
InitGpuConst();
}
void GPUrender::ChangeView(bool perspective){
PerspectiveView = perspective;
}
void GPUrender::InitVolume(unsigned char* Volume, int size[3]){
volume = Volume;
volumeSize[0] = size[0];
volumeSize[1] = size[1];
volumeSize[2] = size[2];
at[0] = size[0]/2;
at[1] = size[1]/2;
at[2] = size[2]/2;
glewInit();
hipMemcpyToSymbol(gVolumeSize, volumeSize, sizeof(int)*3);
blockSize[0] = volumeSize[0]/8;
blockSize[1] = volumeSize[1]/8;
blockSize[2] = volumeSize[2]/8;
if(volumeSize[0]%8)
blockSize[0]+=1;
if(volumeSize[1]%8)
blockSize[1]+=1;
if(volumeSize[2]%8)
blockSize[2]+=1;
printf("%d %d %d\n", blockSize[0], blockSize[1], blockSize[2]);
float fBlockSize[3] = {(float)blockSize[0], (float)blockSize[1], (float)blockSize[2]};
hipMemcpyToSymbol(gBlockSize, fBlockSize, sizeof(float)*3);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<unsigned char>();
hipExtent eVolumeSize = make_hipExtent(volumeSize[0], volumeSize[1], volumeSize[2]);
hipMalloc3DArray(&hipArray, &channelDesc, eVolumeSize, 0);
hipMemcpy3DParms params = {0};
params.extent = eVolumeSize;
params.dstArray = hipArray;
params.kind = hipMemcpyHostToDevice;
params.srcPtr = make_hipPitchedPtr((void*)volume, sizeof(unsigned char)*volumeSize[0], volumeSize[0], volumeSize[1]);
hipMemcpy3D(¶ms);
texPtr.filterMode=hipFilterModeLinear;//linear texture float
texPtr.addressMode[0]=hipAddressModeWrap;
texPtr.addressMode[1]=hipAddressModeWrap;
texPtr.addressMode[2]=hipAddressModeWrap;
hipBindTextureToArray(texPtr, hipArray, channelDesc);
float time;
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipMalloc((void**)&gEmptyBlockMax, sizeof(unsigned char)*blockSize[0]*blockSize[1]*blockSize[2]);
hipMalloc((void**)&gEmptyBlockMin, sizeof(unsigned char)*blockSize[0]*blockSize[1]*blockSize[2]);
int block = blockSize[0]*blockSize[1]*blockSize[2]/512;
if((blockSize[0]*blockSize[1]*blockSize[2])%512)
block++;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( InitMinMaxEmptyBlock), dim3(block), dim3(512), 0, 0, gEmptyBlockMax, gEmptyBlockMin);
hipEventRecord(end, 0);
hipEventSynchronize(end);
hipEventElapsedTime(&time, start, end);
printf("InitMinMaxEmptyBlock time = %fms\n", time);
}
void GPUrender::InitColorTable(float3* ColorTable){
colorTable = ColorTable;
}
void GPUrender::InitAlphaTable(float* AlphaTable){
alphaTable = AlphaTable;
}
void GPUrender::EyeBodyCancel(){
if (pbo) {
hipGraphicsUnregisterResource(cuda_pbo_resource);//
glDeleteBuffers(1, &pbo);
}
hipUnbindTexture(texPtr);
hipFreeArray(hipArray);
} | e7e5849c14e4f39c8eb5bee611aa2dcf4c25ec39.cu | #include "GPUrender.cuh"
//프로젝트 20, 코드파일 12
//gpu 상수메모리
__constant__ int gResolution;
__constant__ int gVolumeSize[3];
__constant__ float gBlockSize[3];//volume을 8칸씩 나눈 x, y, z 개수
__constant__ float gEye[3];
__constant__ float gDir[3];
__constant__ float gCross[3];
__constant__ float gU[3];
__constant__ float gL[3];
__constant__ int gValidDir[3];
__constant__ bool emptyBlock[32*32*29];
//__constant__ bool emptyBlock[*40*40]; //볼륨이 커지면 40으로 크게 잡자?
cudaArray* cudaArray = {0};
GLuint pbo = 0; // EyeBody pixel buffer object
struct cudaGraphicsResource *cuda_pbo_resource;
//gpu 텍스쳐메모리
texture<unsigned char, 3, cudaReadModeNormalizedFloat> texPtr;
__inline__ __host__ __device__ void vec_add(float a[3], float b[3], float c[3]){
c[0] = a[0] + b[0];
c[1] = a[1] + b[1];
c[2] = a[2] + b[2];
}
__inline__ __host__ __device__ void vec_sub(float a[3], float b[3], float c[3]){
c[0] = a[0] - b[0];
c[1] = a[1] - b[1];
c[2] = a[2] - b[2];
}
__inline__ __host__ __device__ void s_product(float a[3], float size, float b[3]){
b[0] = a[0] * size;
b[1] = a[1] * size;
b[2] = a[2] * size;
}
__inline__ __host__ __device__ void cross_product(float a[3], float b[3], float c[3])
{
c[0] = a[1]*b[2] - a[2]*b[1];
c[1] = a[2]*b[0] - a[0]*b[2];
c[2] = a[0]*b[1] - a[1]*b[0];
}
__inline__ __host__ __device__ float vec_lenth(float a[3]){
return sqrtf(a[0]*a[0] + a[1]*a[1] + a[2]*a[2]);
}
__inline__ __host__ __device__ float inner_product(float a[3], float b[3]){
float buf = a[0]*b[0];
buf += a[1]*b[1];
buf += a[2]*b[2];
return buf;
}
__inline__ __device__ void getNormal(float pos[3], float N[3]){
N[0] = (tex3D(texPtr, pos[0]+1, pos[1], pos[2]) - tex3D(texPtr, pos[0]-1, pos[1], pos[2]))/2.0f;
N[1] = (tex3D(texPtr, pos[0], pos[1]+1, pos[2]) - tex3D(texPtr, pos[0], pos[1]-1, pos[2]))/2.0f;
N[2] = (tex3D(texPtr, pos[0], pos[1], pos[2]+1) - tex3D(texPtr, pos[0], pos[1], pos[2]-1))/2.0f;
float len = vec_lenth(N);
if(len != 0)//0으로 나눠지는걸 주의
s_product(N, 1/vec_lenth(N), N); //단위벡터로 만들기
}
__inline__ __device__ float sign(float a){
if(a > 0)
return 1.0f;
if(a < 0)
return -1.0f;
return 0.0f;
}
//parallel 함수
__inline__ __device__ bool IsIntersectRayBox1(float& startT, float& endT, float pos[3], int tx, int ty){
float buf[3];
float start[3];
float dx[3], dy[3];
float delta[3];
float Max[3], Min[3];//x, y, z의 최대, 최소
int j = 0;
s_product(gCross, tx-gResolution*0.5f, dx);//x계산
s_product(gU, ty-gResolution*0.5f, dy);//y계산
vec_add(dx, dy, delta);//x+y = point
vec_add(gEye, delta, start);//start+eye = start <- 시작지점
for(int i = 0; i < 3; i++){
float a, b;
if(gValidDir[i] == 1){
a = (gVolumeSize[i]-1 - start[i])/ gDir[i];
b = (0.0f - start[i])/ gDir[i];
if(a > b){//크기 정리
Max[j] = a;
Min[j] = b;
}
else{
Max[j] = b;
Min[j] = a;
}
j++;
}
}
startT = Min[0];
for(int i = 1; i < j; i++){//Min중 Max를 찾자. = startT가 된다.
if(startT < Min[i])
startT = Min[i];
}
startT += 0.001f;
endT = Max[0];
for(int i = 1; i < j; i++){//Max중 Min을 찾자. - endT가 된다.
if(endT > Max[i])
endT = Max[i];
}
endT -= 0.001f;
//광선과 데이터가 만나는 점을 찾아보자.
s_product(gDir, startT, buf);
vec_add(start, buf, pos);
//광선과 박스의 교점을 찾을수 없으면
float maxBox[3] = {gVolumeSize[0]-1, gVolumeSize[1]-1, gVolumeSize[2]-1};
float minBox[3] = {0.0f, 0.0f, 0.0f};
float result1[3];
float result2[3];
for(int i = 0; i < 3; i++){
result1[i] = sign(minBox[i]- pos[i]);
result2[i] = sign(pos[i] - maxBox[i]);
}
float k = inner_product(result1, result2);
if(k == 3.0f)
return true;
return false;
}
__inline__ __device__ int EmptySpaceLeap1(float pos[3]){
//현재 박스가 비어있음을 확인하면 다음박스로 도약한다.
int dt = 0;
float currentBox[3] = {floorf(pos[0]*0.125f), floorf(pos[1]*0.125f), floorf(pos[2]*0.125f)};
float currentBoxId = currentBox[0]+ currentBox[1]*gBlockSize[0] + currentBox[2]*gBlockSize[0]*gBlockSize[1];
if(emptyBlock[(int)currentBoxId]){
while(true){
dt++;
vec_add(pos, gDir, pos);
float forwardBox[3] = {floorf(pos[0]*0.125f), floorf(pos[1]*0.125f), floorf(pos[2]*0.125f)};
float forwardBoxId = forwardBox[0]+ forwardBox[1]*gBlockSize[0] + forwardBox[2]*gBlockSize[0]*gBlockSize[1];
//새로운 박스에 도달하면 빈공간도약검사를 끝낸다.
if(currentBoxId != forwardBoxId)
return dt;
}
}
//비어있지않으면 알파블렌딩
return dt;
}
__inline__ __device__ float AlphaBlending1(float4* PIT, float pos[3], float3& cAcc, const float aOld){
unsigned char nowData = (unsigned char)(tex3D(texPtr, pos[0], pos[1], pos[2])*255.0f);
unsigned char nextData = (unsigned char)(tex3D(texPtr, pos[0]+gDir[0], pos[1]+gDir[1], pos[2]+gDir[2])*255.0f);
float N[3];// 픽셀의 법선벡터
getNormal(pos, N);//법선벡터를 찾는다.
float NL = fabs(inner_product(N, gL));//N과 L의 내적 - 절대값
float NH = fabs(pow(inner_product(N, gDir), 16));
float light = 0.2f + 0.7f*NL + 0.1f*NH;
if(light > 1.0f)
light = 1.0f;
int index = nowData*256 + nextData;
light *= 1.0f-aOld;
float alpha = PIT[index].w;
cAcc.x += PIT[index].x*light;
cAcc.y += PIT[index].y*light;
cAcc.z += PIT[index].z*light;
return 1.0f-(1.0f-aOld) * (1.0f-alpha);//알파값 리턴
}
__inline__ __device__ float3 RayTracing1(float4* PIT, float start[3], const float startT, const float endT){
float pos[3] = {start[0], start[1], start[2]};
float aNew = 0.0f;
float aOld = 0.0f;
float3 cAcc = {0};
for(float t = startT; t <= endT; t+=1.0f){
int dt = EmptySpaceLeap1(pos);
//dt는 생략할 칸수
if(dt){//공간이 비어있다면
t+=dt-1.0f;
continue;
}
aNew = AlphaBlending1(PIT, pos, cAcc, aOld);
//Early Ray Termination
if(aNew > 0.99f)
break;
aOld = aNew;
vec_add(pos, gDir, pos);
}
return cAcc;
}
__global__ void G_Parallel(unsigned char* tex, float4* PIT){
//const int tx = blockDim.x*blockIdx.x + threadIdx.x;//영상의 x좌표
//const int ty = blockDim.y*blockIdx.y + threadIdx.y;//영상의 y좌표
//const int locTexture = ty*256 + tx;//일차원으로 계산된 좌표
const int locTexture = blockDim.x*blockIdx.x + threadIdx.x;
const int ty = locTexture/gResolution;
const int tx = locTexture%gResolution;
float pos[3];
float startT, endT;
//IsIntersectRayBox는 광선이 유효한지 확인하고 시작T와 끝T를 구한다.
if(!IsIntersectRayBox1(startT, endT, pos, tx, ty)){
tex[locTexture*3] = 0;
tex[locTexture*3 + 1] = 0;
tex[locTexture*3 + 2] = 0;
return;//유효하지 않은 좌표면 스레드 종료
}
float3 cAcc = RayTracing1(PIT, pos, startT, endT);
tex[locTexture*3] = (unsigned char)(cAcc.x*255.0f);
tex[locTexture*3 + 1] = (unsigned char)(cAcc.y*255.0f);
tex[locTexture*3 + 2] = (unsigned char)(cAcc.z*255.0f);
}
//parallel 함수 끝
//perspective 함수
__inline__ __device__ bool IsIntersectRayBox2(float& startT, float& endT, float pos[3], float dir[3], int tx, int ty){
float buf[3];
float f[3];
float cameraCenter[3];
float start[3];
float Max[3], Min[3];//x, y, z의 최대, 최소
float dx[3], dy[3];
float delta[3];
int j = 0;
s_product(gCross, tx-gResolution*0.5f, dx);//x계산
s_product(dx, 0.005f, dx);
s_product(gU, ty-gResolution*0.5f, dy);//y계산
s_product(dy, 0.005f, dy);
vec_add(dx, dy, delta);//dx+dy = delta
s_product(gDir, 1.0f, f);
vec_add(gEye, f, cameraCenter);
vec_add(cameraCenter, delta, start);
vec_sub(start, gEye, dir);//각 픽셀마다 다른 dir을 가진다.
s_product(dir, 1/vec_lenth(dir), dir);
for(int i = 0; i < 3; i++){
float a, b;
if(gValidDir[i] == 1){
a = (gVolumeSize[i]-1 - start[i])/ dir[i];
b = (0.0f - start[i])/ dir[i];
if(a > b){//크기 정리
Max[j] = a;
Min[j] = b;
}
else{
Max[j] = b;
Min[j] = a;
}
j++;
}
}
endT = Max[0];
for(int i = 1; i < j; i++){//Max중 Min을 찾자. - endT가 된다.
if(endT > Max[i])
endT = Max[i];
}
endT -= 0.001f;
//볼륨크기
float maxBox[3] = {gVolumeSize[0], gVolumeSize[1], gVolumeSize[2]};
float minBox[3] = {0.0f, 0.0f, 0.0f};
float result1[3];
float result2[3];
float k;
for(int i = 0; i < 3; i++){
result1[i] = sign(minBox[i]- start[i]);
result2[i] = sign(start[i] - maxBox[i]);
}
k = inner_product(result1, result2);
//start가 볼륨 내부에 있는지 확인한후에 내부이면 광선교차 검사할필요가 없다.
if(k == 3.0f){//박스내부
pos[0] = start[0];
pos[1] = start[1];
pos[2] = start[2];
startT = 0.0f;
return true;
}
//볼륨 외부에 있을경우 교차점도 찾아야한다.
startT = Min[0];
for(int i = 1; i < j; i++){//Min중 Max를 찾자. = startT가 된다.
if(startT < Min[i])
startT = Min[i];
}
startT += 0.001f;
//볼륨 외부에 있을경우에는 광선과 박스가 만나는 지점을 찾아야함
s_product(dir, startT, buf);
vec_add(start, buf, pos);
for(int i = 0; i < 3; i++){
result1[i] = sign(minBox[i]- pos[i]);
result2[i] = sign(pos[i] - maxBox[i]);
}
k = inner_product(result1, result2);
if(k == 3.0f)//박스 내부임
return true;
return false;
}
__inline__ __device__ int EmptySpaceLeap2(float pos[3], float dir[3]){
//현재 박스가 비어있음을 확인하면 다음박스로 도약한다.
int dt = 0;
float currentBox[3] = {floorf(pos[0]*0.125f), floorf(pos[1]*0.125f), floorf(pos[2]*0.125f)};
float currentBoxId = currentBox[0]+ currentBox[1]*gBlockSize[0] + currentBox[2]*gBlockSize[0]*gBlockSize[1];
if(emptyBlock[(int)currentBoxId]){
while(true){
dt++;
vec_add(pos, dir, pos);
float forwardBox[3] = {floorf(pos[0]*0.125f), floorf(pos[1]*0.125f), floorf(pos[2]*0.125f)};
float forwardBoxId = forwardBox[0]+ forwardBox[1]*gBlockSize[0] + forwardBox[2]*gBlockSize[0]*gBlockSize[2];
//새로운 박스에 도달하면 빈공간도약검사를 끝낸다.
if(currentBoxId != forwardBoxId)
break;
}
}
//비어있지않으면 알파블렌딩
return dt;
}
__inline__ __device__ float AlphaBlending2(float4* PIT, float pos[3], float dir[3], float3& cAcc, const float aOld){
unsigned char nowData = (unsigned char)(tex3D(texPtr, pos[0], pos[1], pos[2])*255.0f);
unsigned char nextData = (unsigned char)(tex3D(texPtr, pos[0]+dir[0], pos[1]+dir[1], pos[2]+dir[2])*255.0f);
float N[3];// 픽셀의 법선벡터
getNormal(pos, N);//법선벡터를 찾는다.
float NL = fabs(inner_product(N, gL));//N과 L의 내적 - 절대값
float NH = fabs(pow(inner_product(N, gDir), 16));
float light = 0.2f + 0.7f*NL + 0.1f*NH;
if(light > 1.0f)
light = 1.0f;
int index = nowData*256 + nextData;
light *= 1.0f-aOld;
float alpha = PIT[index].w;
cAcc.x += PIT[index].x*light;
cAcc.y += PIT[index].y*light;
cAcc.z += PIT[index].z*light;
return 1.0f-(1.0f-aOld) * (1.0f-alpha);//알파값 리턴
}
__inline__ __device__ float3 RayTracing2(float4* PIT, float start[3], float dir[3], const float startT, const float endT){
float pos[3] = {start[0], start[1], start[2]};
float aNew = 0.0f;
float aOld = 0.0f;
float3 cAcc = {0};
for(float t = startT; t <= endT; t+=1.0f){
int dt = EmptySpaceLeap2(pos, dir);
//비어있다면
if(dt){
t+=dt-1.0f;
continue;
}
//빈공간이 아니라면
aNew = AlphaBlending2(PIT, pos, dir, cAcc, aOld);
//Early Ray Termination
if(aNew > 0.99f)
break;
aOld = aNew;
vec_add(pos, dir, pos);
}
return cAcc;
}
__global__ void G_Perspective(unsigned char* tex, float4* PIT){
const int locTexture = blockDim.x*blockIdx.x + threadIdx.x;
const int ty = locTexture/gResolution;
const int tx = locTexture%gResolution;
float pos[3];
float dir[3];
float startT, endT;
//IsIntersectRayBox는 광선이 유효한지 확인하고 시작T와 끝T를 구한다.
if(!IsIntersectRayBox2(startT, endT, pos, dir, tx, ty)){
tex[locTexture*3] = 0;
tex[locTexture*3 + 1] = 0;
tex[locTexture*3 + 2] = 0;
return;//유효하지 않은 좌표면 스레드 종료
}
float3 cAcc = RayTracing2(PIT, pos, dir, startT, endT);
tex[locTexture*3] = (unsigned char)(cAcc.x*255.0f);
tex[locTexture*3 + 1] = (unsigned char)(cAcc.y*255.0f);
tex[locTexture*3 + 2] = (unsigned char)(cAcc.z*255.0f);
}
//perspective 함수 끝
__global__ void ChangeAlpha(float* alphaTable, int* transparentTable, int* aSAT){
int i = threadIdx.x;
if(alphaTable[i] == 0)
transparentTable[i] = 0;
else
transparentTable[i] = 1;
__syncthreads();
for(int j = i; j < 256; j++)
atomicAdd(&aSAT[j+1], transparentTable[i]);
if(i == 0)
aSAT[0] = 0;
}
__global__ void InitMinMaxEmptyBlock(unsigned char* emptyBlockMax, unsigned char* emptyBlockMin){
int i = blockDim.x*blockIdx.x + threadIdx.x;
int bz = i/((int)gBlockSize[0]*(int)gBlockSize[1]);
int by = (i%((int)gBlockSize[0]*(int)gBlockSize[1]))/gBlockSize[0];
int bx = i%(int)gBlockSize[1];
float m = 1.0f;
float M = 0.0f;
int vz, vy, vx;
for(vz = bz*8; vz <= bz*8 + 8; vz++){
if(vz > gVolumeSize[2]-1)
break;
for(vy = by*8; vy <= by*8 + 8; vy++){
for(vx = bx*8; vx <= bx*8 + 8; vx++){
float data = tex3D(texPtr, vx, vy, vz);
M = max(M, data);
m = min(m, data);
}
}
}
emptyBlockMin[i] = (unsigned char)(m*255);
emptyBlockMax[i] = (unsigned char)(M*255);
}
__global__ void InitEmptyBlock(bool* emptyBlock, unsigned char* emptyBlockMax, unsigned char* emptyBlockMin, int* aSAT){
int i = blockDim.x*blockIdx.x + threadIdx.x;
int *pSAT = &(aSAT[1]);
emptyBlock[i] = (pSAT[emptyBlockMax[i]] == pSAT[emptyBlockMin[i]-1]) ? true : false;
}
__global__ void InitPreIntegration(float4* pit, float* alphaTable, float3* colorTable){
int i = threadIdx.x;
if(i == 255){
for(int j = 0; j < 256; j++){
float A = alphaTable[j];
int index = j*256+j;
pit[index].w = A;
pit[index].x = colorTable[j].x * A;
pit[index].y = colorTable[j].y * A;
pit[index].z = colorTable[j].z * A;
}
return;
}
int k = 255 - i;
float samplingTable[256];
for(int j = 0; j < 256; j++){
float A = alphaTable[j];//최소값+j번째의 alpha값을 찾는다. s == e면 진행할필요없음
A = 1.0f - pow(1.0f - A, 1.0f/k);
samplingTable[j] = A;
}
int e = 255-i;
int s = 0;
for( ; s <= i; s++,e++){
float A = 0, aOld = 0, aNew = 0;
float3 cAcc = {0};
//정사각형의 대각선 /을 기준으로 왼쪽상단
for(int j = s; j < e; j++){
float k = 1.0f - aOld;
A = samplingTable[j];
aNew = 1.0f - k*(1.0f - A);
cAcc.x += k*colorTable[j].x*A;
cAcc.y += k*colorTable[j].y*A;
cAcc.z += k*colorTable[j].z*A;
if(aNew > 0.99f)
break;
aOld = aNew;
}
int index = s*256 + e;
pit[index].x = cAcc.x;
pit[index].y = cAcc.y;
pit[index].z = cAcc.z;
pit[index].w = aNew;
//정사각형의 대각선 /을 기준으로 오른쪽하단
aOld = 0, aNew = 0;
cAcc.x = cAcc.y = cAcc.z = 0;
for(int j = e; j > s; j--){
float k = 1.0f - aOld;
A = samplingTable[j];
aNew = 1.0f - k*(1.0f - A);
cAcc.x += k*colorTable[j].x*A;
cAcc.y += k*colorTable[j].y*A;
cAcc.z += k*colorTable[j].z*A;
if(aNew > 0.99f)
break;
aOld = aNew;
}
index = e*256 + s;
pit[index].x = cAcc.x;
pit[index].y = cAcc.y;
pit[index].z = cAcc.z;
pit[index].w = aNew;
}
}
GPUrender::GPUrender(){
PerspectiveView = false;
eye[0] = eye[1] = eye[2] = 0;
float sqr = 1/sqrtf(3);
L[0] = L[1] = L[2] = sqr;
up[0] = up[1] = 0;
up[2] = -1;
validDir[0] = validDir[1] = validDir[2] = 0;
zoom = 1.0f;
resolution = 256;
pbo = 0;
}
void GPUrender::InitColor(){
float* gAlphaTable;
int* gTransparentTable;
int* gSAT;
float3* gColorTable;
bool* gEmptyBlock;
float time;
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaMalloc((void**)&gSAT, sizeof(int)*257);
cudaMalloc((void**)&gAlphaTable, sizeof(float)*256);
cudaMalloc((void**)&gTransparentTable, sizeof(int)*256);
cudaMalloc((void**)&gColorTable, sizeof(float3)*256);
cudaMemcpy(gAlphaTable, alphaTable, sizeof(float)*256, cudaMemcpyHostToDevice);
cudaMemcpy(gColorTable, colorTable, sizeof(float3)*256, cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
ChangeAlpha<<<1, 256>>>(gAlphaTable, gTransparentTable, gSAT);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&time, start, end);
printf("InitAlpha time = %fms\n", time);
cudaFree(gTransparentTable);
cudaMalloc((void**)&gEmptyBlock, sizeof(bool)*blockSize[0]*blockSize[1]*blockSize[2]);
cudaMemset(gEmptyBlock, 0, blockSize[0]*blockSize[1]*blockSize[2]*sizeof(bool));
cudaEventRecord(start, 0);
InitEmptyBlock<<<58, 512>>>(gEmptyBlock, gEmptyBlockMax, gEmptyBlockMin, gSAT);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&time, start, end);
printf("InitEmptyBlock time = %fms\n", time);
//상수메모리로 빈공간블록정보 보낸다.
//cudaMemset(emptyBlock, 0, blockSize[0]*blockSize[1]*blockSize[2]*sizeof(bool));
cudaMemcpyToSymbol(emptyBlock, gEmptyBlock, sizeof(bool)*blockSize[0]*blockSize[1]*blockSize[2], 0, cudaMemcpyDeviceToDevice);
cudaFree(gEmptyBlock);
cudaFree(gSAT);
cudaMalloc((void**)&gPIT, sizeof(float4)*256*256);
cudaEventRecord(start, 0);
InitPreIntegration<<<1, 256>>>(gPIT, gAlphaTable, gColorTable);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&time, start, end);
printf("InitPreintegration time = %fms\n", time);
cudaEventDestroy(start);
cudaEventDestroy(end);
cudaFree(gAlphaTable);
cudaFree(gColorTable);
}
void GPUrender::InitGpuConst(){
vec_sub(at, eye, dir); //dir 벡터 생성
s_product(dir, 1.0f/vec_lenth(dir), dir); //dir을 정규화
cross_product(up, dir, cross);//cross벡터 생성
s_product(cross, (256.0f/resolution)*zoom/vec_lenth(cross), cross);//cross벡터 정규화 <- 시점의 x좌표
//s_product(cross, zoom, cross);//배율
cross_product(dir, cross, u);//u벡터 생성
s_product(u, (256.0f/resolution)*zoom/vec_lenth(u), u);//u벡터 정규화 <- 시점의 y좌표
//s_product(u, zoom, u);//배율
if(dir[0] != 0)//x방향이 0이 아니면
validDir[0] = 1;
if(dir[1] != 0)//y방향이 0이 아니면
validDir[1] = 1;
if(dir[2] != 0)//z방향이 0이 아니면
validDir[2] = 1;
//gpu에 상수메모리로 넣음
int const_size = sizeof(float)*3;//상수메모리에 사용할 크기
cudaMemcpyToSymbol(gEye, eye, const_size);
cudaMemcpyToSymbol(gDir, dir, const_size);
cudaMemcpyToSymbol(gCross, cross, const_size);
cudaMemcpyToSymbol(gU, u, const_size);
cudaMemcpyToSymbol(gL, L, const_size);
cudaMemcpyToSymbol(gValidDir, validDir, sizeof(int)*3);
cudaMemcpyToSymbol(gResolution, &resolution, sizeof(int)*1);
}
void GPUrender::InitPixelBuffer(){
glGenBuffers(1, &pbo);//버퍼 객체를 생성한다.
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
//버퍼에 타겟을 생성함 (GLenum target,GLuint buffer)
//타겟에 대한 API는 https://www.EyeBody.org/sdk/docs/man/html/glBindBuffer.xhtml
//GL_PIXEL_UNPACK_BUFFER는 Texture data source목적임
glBufferData(GL_PIXEL_UNPACK_BUFFER,
3*resolution*resolution*sizeof(GLubyte),
0,
GL_STREAM_DRAW);
//바인드된 버퍼에 데이터생성(메모리 생성함)
//glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
cudaGraphicsGLRegisterBuffer(&cuda_pbo_resource,
pbo,
cudaGraphicsMapFlagsNone);
}
void GPUrender::Rendering() {
unsigned char* gTex;
//cudaError_t result;//에러검사
cudaEvent_t start, end;
float time;
//EyeBody과 텍스쳐를 연동하는 코드
cudaGraphicsMapResources(1, &cuda_pbo_resource, 0);//1개 맵핑
cudaGraphicsResourceGetMappedPointer((void **)&gTex, NULL, cuda_pbo_resource);//메모리 포인터를 얻어온다.(맵핑한다)
//x개수 = 8*32 = 256, y개수 = 16*16 = 256 => Texture크기
//dim3 Dg(8, 16, 1);
//dim3 Db(32, 16, 1);//32*16 = 512최대쓰레드 사용
int block = resolution*resolution/512;
//시간측정 코드
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
//커널함수 호출
if(PerspectiveView)
G_Perspective<<<block, 512>>>(gTex, gPIT);
else
G_Parallel<<<block, 512>>>(gTex, gPIT);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
printf("Renter time = %fms\n", time);
//시간측정 코드 끝
//맵핑해제하면 EyeBody에서 텍스쳐출력
cudaGraphicsUnmapResources(1, &cuda_pbo_resource, 0);
}
void GPUrender::DrawTexture(){
glClear(GL_COLOR_BUFFER_BIT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, 3, resolution, resolution, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
glEnable(GL_TEXTURE_2D);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
glBegin(GL_QUADS);
glTexCoord2f(0.0, 0.0); glVertex2f(-1.0, -1.0);
glTexCoord2f(0.0, 1.0); glVertex2f(-1.0, 1.0);
glTexCoord2f(1.0, 1.0); glVertex2f(1.0, 1.0);
glTexCoord2f(1.0, 0.0); glVertex2f(1.0, -1.0);
glEnd();
}
void GPUrender::MouseRotateEye(int x, int y){
/* 1. eye와 at의 거리(A)를 계산한다.
2. eye을 현재 cross와 up으로 x,y만큼 움직인다
3. 변경된 eye와 at의 거리를 A가 되도록 만든다.
*/
//1번
float A = sqrtf((eye[0]-at[0])*(eye[0]-at[0]) + (eye[1]-at[1])*(eye[1]-at[1]) + (eye[2]-at[2])*(eye[2]-at[2]));
//2번
eye[0] += -x*cross[0] + y*u[0];
eye[1] += -x*cross[1] + y*u[1];
eye[2] += -x*cross[2] + y*u[2];
vec_sub(at, eye, dir); //dir 벡터 생성
s_product(dir, 1.0f/vec_lenth(dir), dir); //dir을 정규화
//3번
s_product(dir, A, dir);
vec_sub(at, dir, eye);
InitGpuConst();
}
void GPUrender::ForwardEye(bool forward){
if(forward){
if(PerspectiveView){
float buf[3];
s_product(dir, 8, buf);
vec_add(eye, buf, eye);
}
else
zoom /= 1.1f;
}
else{
if(PerspectiveView){
float buf[3];
s_product(dir, 8, buf);
vec_sub(eye, buf, eye);
}
else
zoom *= 1.1f;
}
printf("eye (%.3f, %.3f, %.3f)\n", eye[0], eye[1], eye[2]);
printf("dir (%.3f, %.3f, %.3f)\n", dir[0], dir[1], dir[2]);
InitGpuConst();
}
void GPUrender::ChangeResolution(int n){
resolution = n;
InitPixelBuffer();
InitGpuConst();
}
void GPUrender::ChangeView(bool perspective){
PerspectiveView = perspective;
}
void GPUrender::InitVolume(unsigned char* Volume, int size[3]){
volume = Volume;
volumeSize[0] = size[0];
volumeSize[1] = size[1];
volumeSize[2] = size[2];
at[0] = size[0]/2;
at[1] = size[1]/2;
at[2] = size[2]/2;
glewInit();
cudaMemcpyToSymbol(gVolumeSize, volumeSize, sizeof(int)*3);
blockSize[0] = volumeSize[0]/8;
blockSize[1] = volumeSize[1]/8;
blockSize[2] = volumeSize[2]/8;
if(volumeSize[0]%8)
blockSize[0]+=1;
if(volumeSize[1]%8)
blockSize[1]+=1;
if(volumeSize[2]%8)
blockSize[2]+=1;
printf("%d %d %d\n", blockSize[0], blockSize[1], blockSize[2]);
float fBlockSize[3] = {(float)blockSize[0], (float)blockSize[1], (float)blockSize[2]};
cudaMemcpyToSymbol(gBlockSize, fBlockSize, sizeof(float)*3);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<unsigned char>();
cudaExtent eVolumeSize = make_cudaExtent(volumeSize[0], volumeSize[1], volumeSize[2]);
cudaMalloc3DArray(&cudaArray, &channelDesc, eVolumeSize, 0);
cudaMemcpy3DParms params = {0};
params.extent = eVolumeSize;
params.dstArray = cudaArray;
params.kind = cudaMemcpyHostToDevice;
params.srcPtr = make_cudaPitchedPtr((void*)volume, sizeof(unsigned char)*volumeSize[0], volumeSize[0], volumeSize[1]);
cudaMemcpy3D(¶ms);
texPtr.filterMode=cudaFilterModeLinear;//linear는 texture가 float형만 가능하다
texPtr.addressMode[0]=cudaAddressModeWrap;
texPtr.addressMode[1]=cudaAddressModeWrap;
texPtr.addressMode[2]=cudaAddressModeWrap;
cudaBindTextureToArray(texPtr, cudaArray, channelDesc);
float time;
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaMalloc((void**)&gEmptyBlockMax, sizeof(unsigned char)*blockSize[0]*blockSize[1]*blockSize[2]);
cudaMalloc((void**)&gEmptyBlockMin, sizeof(unsigned char)*blockSize[0]*blockSize[1]*blockSize[2]);
int block = blockSize[0]*blockSize[1]*blockSize[2]/512;
if((blockSize[0]*blockSize[1]*blockSize[2])%512)
block++;
cudaEventRecord(start, 0);
InitMinMaxEmptyBlock<<<block, 512>>>(gEmptyBlockMax, gEmptyBlockMin);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&time, start, end);
printf("InitMinMaxEmptyBlock time = %fms\n", time);
}
void GPUrender::InitColorTable(float3* ColorTable){
colorTable = ColorTable;
}
void GPUrender::InitAlphaTable(float* AlphaTable){
alphaTable = AlphaTable;
}
void GPUrender::EyeBodyCancel(){
if (pbo) {
cudaGraphicsUnregisterResource(cuda_pbo_resource);//완전히 연결끊는다
glDeleteBuffers(1, &pbo);
}
cudaUnbindTexture(texPtr);
cudaFreeArray(cudaArray);
} |
2392ed1fb2cbc1b51bafe878a9efbe1a8b273768.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//findMessage.cpp
#include "stdafx.h"
#include "deviceMemoryDef.h"
#include "gpuMD5.h"
/**
min
max
searchScope
*/
pair<bool, string> findMessage(size_t min, size_t max, string searchScope) {
bool isFound = false;
size_t h_isFound = -1; size_t * d_isFound; //
uchar* d_message; uchar h_message[16]; //16
string message = "";
//GoForce GT650M 1024*1024
int nBlocks = 1024;
int nThreadsPerBlock = 1024;
size_t nTotalThreads = nBlocks * nThreadsPerBlock; //
size_t charsetLength = searchScope.length(); //
hipError_t error;
error = hipMalloc((void**)&d_isFound, sizeof(size_t));
if (error != hipSuccess){
printCudaError(error,"", __FILE__, __LINE__);
}
error = hipMemcpy(d_isFound, &h_isFound, sizeof(size_t), hipMemcpyHostToDevice);
if (error != hipSuccess){
printCudaError(error,"", __FILE__, __LINE__);
}
error = hipMalloc((void**)&d_message, 16 * sizeof(uchar));
if (error != hipSuccess){
printCudaError(error,"", __FILE__, __LINE__);
}
//
float* h_startNumbers = new float[nTotalThreads];
float* d_startNumbers;
error = hipMalloc((void**)&d_startNumbers, nTotalThreads * sizeof(float));
if (error != hipSuccess){
printCudaError(error,"", __FILE__, __LINE__);
}
for (size_t size = min; size <= max; ++size) {
cout<<""<<size<<endl;
float maxValue = pow((float)charsetLength, (float)size); //
float nIterations = ceil(maxValue / (nBlocks * nThreadsPerBlock));//,
for (size_t i = 0; i != nTotalThreads; ++i) {
h_startNumbers[i] = i * nIterations;
}
error = hipMemcpy(d_startNumbers, h_startNumbers, nTotalThreads * sizeof(float), hipMemcpyHostToDevice);
if (error != hipSuccess){
printCudaError(error," ", __FILE__, __LINE__);
}
clock_t start = clock();
//
hipLaunchKernelGGL(( searchMD5), dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0, d_startNumbers,
nIterations, charsetLength, size, d_isFound, d_message);
hipDeviceSynchronize();
cout<<""<<(clock()-start)/CLK_TCK<<endl;
printf("%s\n", hipGetErrorString(hipGetLastError()));
hipMemcpy(&h_isFound, d_isFound, sizeof(int), hipMemcpyDeviceToHost);
printf("####################### h_isFound = %d\n", h_isFound);
if (h_isFound != -1) {
printf("h_isFound=%d\n", h_isFound);
hipMemcpy(h_message, d_message, 16 * sizeof(uchar), hipMemcpyDeviceToHost);
for (size_t i = 0; i != size; ++i){
message.push_back(h_message[i]);
}
isFound = true;
cout << message << endl;
break;
}
}
//
hipFree(d_targetDigest);
hipFree(d_powerSymbols);
hipFree(d_powerValues);
hipFree(d_isFound);
hipFree(d_message);
hipFree(d_startNumbers);
delete(h_startNumbers);
cout<<"..."<<endl;
return make_pair(isFound, message);
} | 2392ed1fb2cbc1b51bafe878a9efbe1a8b273768.cu | //findMessage.cpp
#include "stdafx.h"
#include "deviceMemoryDef.h"
#include "gpuMD5.h"
/**
搜索明文
min:明文最小长度
max:明文最大长度
searchScope:搜索空间
*/
pair<bool, string> findMessage(size_t min, size_t max, string searchScope) {
bool isFound = false;
size_t h_isFound = -1; size_t * d_isFound; //搜索结果标识
uchar* d_message; uchar h_message[16]; //明文,最大支持长度为16
string message = "";
//GoForce GT650M 比较优秀的设置:1024*1024
int nBlocks = 1024;
int nThreadsPerBlock = 1024;
size_t nTotalThreads = nBlocks * nThreadsPerBlock; // 总线程数
size_t charsetLength = searchScope.length(); //搜索空间字符数长度
cudaError_t error;
error = cudaMalloc((void**)&d_isFound, sizeof(size_t));
if (error != cudaSuccess){
printCudaError(error,"分配(搜索结果标识)显存出错", __FILE__, __LINE__);
}
error = cudaMemcpy(d_isFound, &h_isFound, sizeof(size_t), cudaMemcpyHostToDevice);
if (error != cudaSuccess){
printCudaError(error,"拷贝(搜索结果标识)至显存出错", __FILE__, __LINE__);
}
error = cudaMalloc((void**)&d_message, 16 * sizeof(uchar));
if (error != cudaSuccess){
printCudaError(error,"分配搜索结果(明文)显存出错", __FILE__, __LINE__);
}
//分配每个线程的搜索起始地址
float* h_startNumbers = new float[nTotalThreads];
float* d_startNumbers;
error = cudaMalloc((void**)&d_startNumbers, nTotalThreads * sizeof(float));
if (error != cudaSuccess){
printCudaError(error,"分配线程的搜索起始地址出错", __FILE__, __LINE__);
}
for (size_t size = min; size <= max; ++size) {
cout<<"当前搜索长度:"<<size<<endl;
float maxValue = pow((float)charsetLength, (float)size); //最大匹配数
float nIterations = ceil(maxValue / (nBlocks * nThreadsPerBlock));//每个线程分配的任务数,即每个线程需要遍历的个数
for (size_t i = 0; i != nTotalThreads; ++i) {
h_startNumbers[i] = i * nIterations;
}
error = cudaMemcpy(d_startNumbers, h_startNumbers, nTotalThreads * sizeof(float), cudaMemcpyHostToDevice);
if (error != cudaSuccess){
printCudaError(error,"拷贝 线程的搜索起始地址 到显存出错", __FILE__, __LINE__);
}
clock_t start = clock();
//开始运算
searchMD5<<< nBlocks, nThreadsPerBlock >>>(d_startNumbers,
nIterations, charsetLength, size, d_isFound, d_message);
cudaThreadSynchronize();
cout<<"耗时:"<<(clock()-start)/CLK_TCK<<endl;
printf("%s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpy(&h_isFound, d_isFound, sizeof(int), cudaMemcpyDeviceToHost);
printf("####################### h_isFound = %d\n", h_isFound);
if (h_isFound != -1) {
printf("h_isFound=%d\n", h_isFound);
cudaMemcpy(h_message, d_message, 16 * sizeof(uchar), cudaMemcpyDeviceToHost);
for (size_t i = 0; i != size; ++i){
message.push_back(h_message[i]);
}
isFound = true;
cout << message << endl;
break;
}
}
//释放内存和显存
cudaFree(d_targetDigest);
cudaFree(d_powerSymbols);
cudaFree(d_powerValues);
cudaFree(d_isFound);
cudaFree(d_message);
cudaFree(d_startNumbers);
delete(h_startNumbers);
cout<<"释放内存完毕..."<<endl;
return make_pair(isFound, message);
} |
61fdea0d59a2508b452b35b615796244a8d52b95.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
////////////////////////////////////////////////////////////////////////////////
//
// NVIDIA CUDA implementation of Brox et al Optical Flow algorithm
//
// Algorithm is explained in the original paper:
// T. Brox, A. Bruhn, N. Papenberg, J. Weickert:
// High accuracy optical flow estimation based on a theory for warping.
// ECCV 2004.
//
// Implementation by Mikhail Smirnov
// email: msmirnov@nvidia.com, devsupport@nvidia.com
//
// Credits for help with the code to:
// Alexey Mendelenko, Anton Obukhov, and Alexander Kharlamov.
//
////////////////////////////////////////////////////////////////////////////////
#if !defined CUDA_DISABLER
#include <iostream>
#include <vector>
#include <memory>
#include "NPP_staging/NPP_staging.hpp"
#include "NCVBroxOpticalFlow.hpp"
#include "opencv2/gpu/device/utility.hpp"
typedef NCVVectorAlloc<Ncv32f> FloatVector;
/////////////////////////////////////////////////////////////////////////////////////////
// Implementation specific constants
/////////////////////////////////////////////////////////////////////////////////////////
__device__ const float eps2 = 1e-6f;
/////////////////////////////////////////////////////////////////////////////////////////
// Additional defines
/////////////////////////////////////////////////////////////////////////////////////////
// rounded up division
inline int iDivUp(int a, int b)
{
return (a + b - 1)/b;
}
/////////////////////////////////////////////////////////////////////////////////////////
// Texture references
/////////////////////////////////////////////////////////////////////////////////////////
texture<float, 2, hipReadModeElementType> tex_coarse;
texture<float, 2, hipReadModeElementType> tex_fine;
texture<float, 2, hipReadModeElementType> tex_I1;
texture<float, 2, hipReadModeElementType> tex_I0;
texture<float, 2, hipReadModeElementType> tex_Ix;
texture<float, 2, hipReadModeElementType> tex_Ixx;
texture<float, 2, hipReadModeElementType> tex_Ix0;
texture<float, 2, hipReadModeElementType> tex_Iy;
texture<float, 2, hipReadModeElementType> tex_Iyy;
texture<float, 2, hipReadModeElementType> tex_Iy0;
texture<float, 2, hipReadModeElementType> tex_Ixy;
texture<float, 1, hipReadModeElementType> tex_u;
texture<float, 1, hipReadModeElementType> tex_v;
texture<float, 1, hipReadModeElementType> tex_du;
texture<float, 1, hipReadModeElementType> tex_dv;
texture<float, 1, hipReadModeElementType> tex_numerator_dudv;
texture<float, 1, hipReadModeElementType> tex_numerator_u;
texture<float, 1, hipReadModeElementType> tex_numerator_v;
texture<float, 1, hipReadModeElementType> tex_inv_denominator_u;
texture<float, 1, hipReadModeElementType> tex_inv_denominator_v;
texture<float, 1, hipReadModeElementType> tex_diffusivity_x;
texture<float, 1, hipReadModeElementType> tex_diffusivity_y;
/////////////////////////////////////////////////////////////////////////////////////////
// SUPPLEMENTARY FUNCTIONS
/////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
/// \brief performs pointwise summation of two vectors stored in device memory
/// \param d_res - pointer to resulting vector (device memory)
/// \param d_op1 - term #1 (device memory)
/// \param d_op2 - term #2 (device memory)
/// \param len - vector size
///////////////////////////////////////////////////////////////////////////////
__global__ void pointwise_add(float *d_res, const float *d_op1, const float *d_op2, const int len)
{
const int pos = blockIdx.x*blockDim.x + threadIdx.x;
if(pos >= len) return;
d_res[pos] = d_op1[pos] + d_op2[pos];
}
///////////////////////////////////////////////////////////////////////////////
/// \brief wrapper for summation kernel.
/// Computes \b op1 + \b op2 and stores result to \b res
/// \param res array, containing op1 + op2 (device memory)
/// \param op1 term #1 (device memory)
/// \param op2 term #2 (device memory)
/// \param count vector size
///////////////////////////////////////////////////////////////////////////////
static void add(float *res, const float *op1, const float *op2, const int count, hipStream_t stream)
{
dim3 threads(256);
dim3 blocks(iDivUp(count, threads.x));
hipLaunchKernelGGL(( pointwise_add), dim3(blocks), dim3(threads), 0, stream, res, op1, op2, count);
}
///////////////////////////////////////////////////////////////////////////////
/// \brief wrapper for summation kernel.
/// Increments \b res by \b rhs
/// \param res initial vector, will be replaced with result (device memory)
/// \param rhs increment (device memory)
/// \param count vector size
///////////////////////////////////////////////////////////////////////////////
static void add(float *res, const float *rhs, const int count, hipStream_t stream)
{
add(res, res, rhs, count, stream);
}
///////////////////////////////////////////////////////////////////////////////
/// \brief kernel for scaling vector by scalar
/// \param d_res scaled vector (device memory)
/// \param d_src source vector (device memory)
/// \param scale scalar to scale by
/// \param len vector size (number of elements)
///////////////////////////////////////////////////////////////////////////////
__global__ void scaleVector(float *d_res, const float *d_src, float scale, const int len)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos >= len) return;
d_res[pos] = d_src[pos] * scale;
}
///////////////////////////////////////////////////////////////////////////////
/// \brief scale vector by scalar
///
/// kernel wrapper
/// \param d_res scaled vector (device memory)
/// \param d_src source vector (device memory)
/// \param scale scalar to scale by
/// \param len vector size (number of elements)
/// \param stream CUDA stream
///////////////////////////////////////////////////////////////////////////////
static void ScaleVector(float *d_res, const float *d_src, float scale, const int len, hipStream_t stream)
{
dim3 threads(256);
dim3 blocks(iDivUp(len, threads.x));
hipLaunchKernelGGL(( scaleVector), dim3(blocks), dim3(threads), 0, stream, d_res, d_src, scale, len);
}
const int SOR_TILE_WIDTH = 32;
const int SOR_TILE_HEIGHT = 6;
const int PSOR_TILE_WIDTH = 32;
const int PSOR_TILE_HEIGHT = 6;
const int PSOR_PITCH = PSOR_TILE_WIDTH + 4;
const int PSOR_HEIGHT = PSOR_TILE_HEIGHT + 4;
///////////////////////////////////////////////////////////////////////////////
///\brief Utility function. Compute smooth term diffusivity along x axis
///\param s (out) pointer to memory location for result (diffusivity)
///\param pos (in) position within shared memory array containing \b u
///\param u (in) shared memory array containing \b u
///\param v (in) shared memory array containing \b v
///\param du (in) shared memory array containing \b du
///\param dv (in) shared memory array containing \b dv
///////////////////////////////////////////////////////////////////////////////
__forceinline__ __device__ void diffusivity_along_x(float *s, int pos, const float *u, const float *v, const float *du, const float *dv)
{
//x derivative between pixels (i,j) and (i-1,j)
const int left = pos-1;
float u_x = u[pos] + du[pos] - u[left] - du[left];
float v_x = v[pos] + dv[pos] - v[left] - dv[left];
const int up = pos + PSOR_PITCH;
const int down = pos - PSOR_PITCH;
const int up_left = up - 1;
const int down_left = down-1;
//y derivative between pixels (i,j) and (i-1,j)
float u_y = 0.25f*(u[up] + du[up] + u[up_left] + du[up_left] - u[down] - du[down] - u[down_left] - du[down_left]);
float v_y = 0.25f*(v[up] + dv[up] + v[up_left] + dv[up_left] - v[down] - dv[down] - v[down_left] - dv[down_left]);
*s = 0.5f / sqrtf(u_x*u_x + v_x*v_x + u_y*u_y + v_y*v_y + eps2);
}
///////////////////////////////////////////////////////////////////////////////
///\brief Utility function. Compute smooth term diffusivity along y axis
///\param s (out) pointer to memory location for result (diffusivity)
///\param pos (in) position within shared memory array containing \b u
///\param u (in) shared memory array containing \b u
///\param v (in) shared memory array containing \b v
///\param du (in) shared memory array containing \b du
///\param dv (in) shared memory array containing \b dv
///////////////////////////////////////////////////////////////////////////////
__forceinline__ __device__ void diffusivity_along_y(float *s, int pos, const float *u, const float *v, const float *du, const float *dv)
{
//y derivative between pixels (i,j) and (i,j-1)
const int down = pos-PSOR_PITCH;
float u_y = u[pos] + du[pos] - u[down] - du[down];
float v_y = v[pos] + dv[pos] - v[down] - dv[down];
const int right = pos + 1;
const int left = pos - 1;
const int down_right = down + 1;
const int down_left = down - 1;
//x derivative between pixels (i,j) and (i,j-1);
float u_x = 0.25f*(u[right] + u[down_right] + du[right] + du[down_right] - u[left] - u[down_left] - du[left] - du[down_left]);
float v_x = 0.25f*(v[right] + v[down_right] + dv[right] + dv[down_right] - v[left] - v[down_left] - dv[left] - dv[down_left]);
*s = 0.5f/sqrtf(u_x*u_x + v_x*v_x + u_y*u_y + v_y*v_y + eps2);
}
///////////////////////////////////////////////////////////////////////////////
///\brief Utility function. Load element of 2D global memory to shared memory
///\param smem pointer to shared memory array
///\param is shared memory array column
///\param js shared memory array row
///\param w number of columns in global memory array
///\param h number of rows in global memory array
///\param p global memory array pitch in floats
///////////////////////////////////////////////////////////////////////////////
template<int tex_id>
__forceinline__ __device__ void load_array_element(float *smem, int is, int js, int i, int j, int w, int h, int p)
{
//position within shared memory array
const int ijs = js * PSOR_PITCH + is;
//mirror reflection across borders
i = max(i, -i-1);
i = min(i, w-i+w-1);
j = max(j, -j-1);
j = min(j, h-j+h-1);
const int pos = j * p + i;
switch(tex_id){
case 0:
smem[ijs] = tex1Dfetch(tex_u, pos);
break;
case 1:
smem[ijs] = tex1Dfetch(tex_v, pos);
break;
case 2:
smem[ijs] = tex1Dfetch(tex_du, pos);
break;
case 3:
smem[ijs] = tex1Dfetch(tex_dv, pos);
break;
}
}
///////////////////////////////////////////////////////////////////////////////
///\brief Utility function. Load part (tile) of 2D global memory to shared memory
///\param smem pointer to target shared memory array
///\param ig column number within source
///\param jg row number within source
///\param w number of columns in global memory array
///\param h number of rows in global memory array
///\param p global memory array pitch in floats
///////////////////////////////////////////////////////////////////////////////
template<int tex>
__forceinline__ __device__ void load_array(float *smem, int ig, int jg, int w, int h, int p)
{
const int i = threadIdx.x + 2;
const int j = threadIdx.y + 2;
load_array_element<tex>(smem, i, j, ig, jg, w, h, p);//load current pixel
__syncthreads();
if(threadIdx.y < 2)
{
//load bottom shadow elements
load_array_element<tex>(smem, i, j-2, ig, jg-2, w, h, p);
if(threadIdx.x < 2)
{
//load bottom right shadow elements
load_array_element<tex>(smem, i+PSOR_TILE_WIDTH, j-2, ig+PSOR_TILE_WIDTH, jg-2, w, h, p);
//load middle right shadow elements
load_array_element<tex>(smem, i+PSOR_TILE_WIDTH, j, ig+PSOR_TILE_WIDTH, jg, w, h, p);
}
else if(threadIdx.x >= PSOR_TILE_WIDTH-2)
{
//load bottom left shadow elements
load_array_element<tex>(smem, i-PSOR_TILE_WIDTH, j-2, ig-PSOR_TILE_WIDTH, jg-2, w, h, p);
//load middle left shadow elements
load_array_element<tex>(smem, i-PSOR_TILE_WIDTH, j, ig-PSOR_TILE_WIDTH, jg, w, h, p);
}
}
else if(threadIdx.y >= PSOR_TILE_HEIGHT-2)
{
//load upper shadow elements
load_array_element<tex>(smem, i, j+2, ig, jg+2, w, h, p);
if(threadIdx.x < 2)
{
//load upper right shadow elements
load_array_element<tex>(smem, i+PSOR_TILE_WIDTH, j+2, ig+PSOR_TILE_WIDTH, jg+2, w, h, p);
//load middle right shadow elements
load_array_element<tex>(smem, i+PSOR_TILE_WIDTH, j, ig+PSOR_TILE_WIDTH, jg, w, h, p);
}
else if(threadIdx.x >= PSOR_TILE_WIDTH-2)
{
//load upper left shadow elements
load_array_element<tex>(smem, i-PSOR_TILE_WIDTH, j+2, ig-PSOR_TILE_WIDTH, jg+2, w, h, p);
//load middle left shadow elements
load_array_element<tex>(smem, i-PSOR_TILE_WIDTH, j, ig-PSOR_TILE_WIDTH, jg, w, h, p);
}
}
else
{
//load middle shadow elements
if(threadIdx.x < 2)
{
//load middle right shadow elements
load_array_element<tex>(smem, i+PSOR_TILE_WIDTH, j, ig+PSOR_TILE_WIDTH, jg, w, h, p);
}
else if(threadIdx.x >= PSOR_TILE_WIDTH-2)
{
//load middle left shadow elements
load_array_element<tex>(smem, i-PSOR_TILE_WIDTH, j, ig-PSOR_TILE_WIDTH, jg, w, h, p);
}
}
__syncthreads();
}
///////////////////////////////////////////////////////////////////////////////
/// \brief computes matrix of linearised system for \c du, \c dv
/// Computed values reside in GPU memory. \n
/// Matrix computation is divided into two steps. This kernel performs first step\n
/// - compute smoothness term diffusivity between pixels - psi dash smooth
/// - compute robustness factor in the data term - psi dash data
/// \param diffusivity_x (in/out) diffusivity between pixels along x axis in smoothness term
/// \param diffusivity_y (in/out) diffusivity between pixels along y axis in smoothness term
/// \param denominator_u (in/out) precomputed part of expression for new du value in SOR iteration
/// \param denominator_v (in/out) precomputed part of expression for new dv value in SOR iteration
/// \param numerator_dudv (in/out) precomputed part of expression for new du and dv value in SOR iteration
/// \param numerator_u (in/out) precomputed part of expression for new du value in SOR iteration
/// \param numerator_v (in/out) precomputed part of expression for new dv value in SOR iteration
/// \param w (in) frame width
/// \param h (in) frame height
/// \param pitch (in) pitch in floats
/// \param alpha (in) alpha in Brox model (flow smoothness)
/// \param gamma (in) gamma in Brox model (edge importance)
///////////////////////////////////////////////////////////////////////////////
__global__ void prepare_sor_stage_1_tex(float *diffusivity_x, float *diffusivity_y,
float *denominator_u, float *denominator_v,
float *numerator_dudv,
float *numerator_u, float *numerator_v,
int w, int h, int s,
float alpha, float gamma)
{
__shared__ float u[PSOR_PITCH * PSOR_HEIGHT];
__shared__ float v[PSOR_PITCH * PSOR_HEIGHT];
__shared__ float du[PSOR_PITCH * PSOR_HEIGHT];
__shared__ float dv[PSOR_PITCH * PSOR_HEIGHT];
//position within tile
const int i = threadIdx.x;
const int j = threadIdx.y;
//position within smem arrays
const int ijs = (j+2) * PSOR_PITCH + i + 2;
//position within global memory
const int ig = blockIdx.x * blockDim.x + threadIdx.x;
const int jg = blockIdx.y * blockDim.y + threadIdx.y;
const int ijg = jg * s + ig;
//position within texture
float x = (float)ig + 0.5f;
float y = (float)jg + 0.5f;
//load u and v to smem
load_array<0>(u, ig, jg, w, h, s);
load_array<1>(v, ig, jg, w, h, s);
load_array<2>(du, ig, jg, w, h, s);
load_array<3>(dv, ig, jg, w, h, s);
//warped position
float wx = (x + u[ijs])/(float)w;
float wy = (y + v[ijs])/(float)h;
x /= (float)w;
y /= (float)h;
//compute image derivatives
const float Iz = tex2D(tex_I1, wx, wy) - tex2D(tex_I0, x, y);
const float Ix = tex2D(tex_Ix, wx, wy);
const float Ixz = Ix - tex2D(tex_Ix0, x, y);
const float Ixy = tex2D(tex_Ixy, wx, wy);
const float Ixx = tex2D(tex_Ixx, wx, wy);
const float Iy = tex2D(tex_Iy, wx, wy);
const float Iyz = Iy - tex2D(tex_Iy0, x, y);
const float Iyy = tex2D(tex_Iyy, wx, wy);
//compute data term
float q0, q1, q2;
q0 = Iz + Ix * du[ijs] + Iy * dv[ijs];
q1 = Ixz + Ixx * du[ijs] + Ixy * dv[ijs];
q2 = Iyz + Ixy * du[ijs] + Iyy * dv[ijs];
float data_term = 0.5f * rsqrtf(q0*q0 + gamma*(q1*q1 + q2*q2) + eps2);
//scale data term by 1/alpha
data_term /= alpha;
//compute smoothness term (diffusivity)
float sx, sy;
if(ig >= w || jg >= h) return;
diffusivity_along_x(&sx, ijs, u, v, du, dv);
diffusivity_along_y(&sy, ijs, u, v, du, dv);
if(ig == 0) sx = 0.0f;
if(jg == 0) sy = 0.0f;
numerator_dudv[ijg] = data_term * (Ix*Iy + gamma * Ixy*(Ixx + Iyy));
numerator_u[ijg] = data_term * (Ix*Iz + gamma * (Ixx*Ixz + Ixy*Iyz));
numerator_v[ijg] = data_term * (Iy*Iz + gamma * (Iyy*Iyz + Ixy*Ixz));
denominator_u[ijg] = data_term * (Ix*Ix + gamma * (Ixy*Ixy + Ixx*Ixx));
denominator_v[ijg] = data_term * (Iy*Iy + gamma * (Ixy*Ixy + Iyy*Iyy));
diffusivity_x[ijg] = sx;
diffusivity_y[ijg] = sy;
}
///////////////////////////////////////////////////////////////////////////////
///\brief computes matrix of linearised system for \c du, \c dv
///\param inv_denominator_u
///\param inv_denominator_v
///\param w
///\param h
///\param s
///////////////////////////////////////////////////////////////////////////////
__global__ void prepare_sor_stage_2(float *inv_denominator_u, float *inv_denominator_v,
int w, int h, int s)
{
__shared__ float sx[(PSOR_TILE_WIDTH+1) * (PSOR_TILE_HEIGHT+1)];
__shared__ float sy[(PSOR_TILE_WIDTH+1) * (PSOR_TILE_HEIGHT+1)];
//position within tile
const int i = threadIdx.x;
const int j = threadIdx.y;
//position within smem arrays
const int ijs = j*(PSOR_TILE_WIDTH+1) + i;
//position within global memory
const int ig = blockIdx.x * blockDim.x + threadIdx.x;
const int jg = blockIdx.y * blockDim.y + threadIdx.y;
const int ijg = jg*s + ig;
int inside = ig < w && jg < h;
float denom_u;
float denom_v;
if(inside)
{
denom_u = inv_denominator_u[ijg];
denom_v = inv_denominator_v[ijg];
}
if(inside)
{
sx[ijs] = tex1Dfetch(tex_diffusivity_x, ijg);
sy[ijs] = tex1Dfetch(tex_diffusivity_y, ijg);
}
else
{
sx[ijs] = 0.0f;
sy[ijs] = 0.0f;
}
int up = ijs+PSOR_TILE_WIDTH+1;
if(j == PSOR_TILE_HEIGHT-1)
{
if(jg < h-1 && inside)
{
sy[up] = tex1Dfetch(tex_diffusivity_y, ijg + s);
}
else
{
sy[up] = 0.0f;
}
}
int right = ijs + 1;
if(threadIdx.x == PSOR_TILE_WIDTH-1)
{
if(ig < w-1 && inside)
{
sx[right] = tex1Dfetch(tex_diffusivity_x, ijg + 1);
}
else
{
sx[right] = 0.0f;
}
}
__syncthreads();
float diffusivity_sum;
diffusivity_sum = sx[ijs] + sx[ijs+1] + sy[ijs] + sy[ijs+PSOR_TILE_WIDTH+1];
if(inside)
{
denom_u += diffusivity_sum;
denom_v += diffusivity_sum;
inv_denominator_u[ijg] = 1.0f/denom_u;
inv_denominator_v[ijg] = 1.0f/denom_v;
}
}
/////////////////////////////////////////////////////////////////////////////////////////
// Red-Black SOR
/////////////////////////////////////////////////////////////////////////////////////////
template<int isBlack> __global__ void sor_pass(float *new_du,
float *new_dv,
const float *g_inv_denominator_u,
const float *g_inv_denominator_v,
const float *g_numerator_u,
const float *g_numerator_v,
const float *g_numerator_dudv,
float omega,
int width,
int height,
int stride)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i >= width || j >= height)
return;
const int pos = j * stride + i;
const int pos_r = i < width - 1 ? pos + 1 : pos;
const int pos_u = j < height - 1 ? pos + stride : pos;
const int pos_d = j > 0 ? pos - stride : pos;
const int pos_l = i > 0 ? pos - 1 : pos;
//load smooth term
float s_up, s_left, s_right, s_down;
s_left = tex1Dfetch(tex_diffusivity_x, pos);
s_down = tex1Dfetch(tex_diffusivity_y, pos);
if(i < width-1)
s_right = tex1Dfetch(tex_diffusivity_x, pos_r);
else
s_right = 0.0f; //Neumann BC
if(j < height-1)
s_up = tex1Dfetch(tex_diffusivity_y, pos_u);
else
s_up = 0.0f; //Neumann BC
//load u, v and du, dv
float u_up, u_left, u_right, u_down, u;
float v_up, v_left, v_right, v_down, v;
float du_up, du_left, du_right, du_down, du;
float dv_up, dv_left, dv_right, dv_down, dv;
u_left = tex1Dfetch(tex_u, pos_l);
u_right = tex1Dfetch(tex_u, pos_r);
u_down = tex1Dfetch(tex_u, pos_d);
u_up = tex1Dfetch(tex_u, pos_u);
u = tex1Dfetch(tex_u, pos);
v_left = tex1Dfetch(tex_v, pos_l);
v_right = tex1Dfetch(tex_v, pos_r);
v_down = tex1Dfetch(tex_v, pos_d);
v = tex1Dfetch(tex_v, pos);
v_up = tex1Dfetch(tex_v, pos_u);
du = tex1Dfetch(tex_du, pos);
du_left = tex1Dfetch(tex_du, pos_l);
du_right = tex1Dfetch(tex_du, pos_r);
du_down = tex1Dfetch(tex_du, pos_d);
du_up = tex1Dfetch(tex_du, pos_u);
dv = tex1Dfetch(tex_dv, pos);
dv_left = tex1Dfetch(tex_dv, pos_l);
dv_right = tex1Dfetch(tex_dv, pos_r);
dv_down = tex1Dfetch(tex_dv, pos_d);
dv_up = tex1Dfetch(tex_dv, pos_u);
float numerator_dudv = g_numerator_dudv[pos];
if((i+j)%2 == isBlack)
{
// update du
float numerator_u = (s_left*(u_left + du_left) + s_up*(u_up + du_up) + s_right*(u_right + du_right) + s_down*(u_down + du_down) -
u * (s_left + s_right + s_up + s_down) - g_numerator_u[pos] - numerator_dudv*dv);
du = (1.0f - omega) * du + omega * g_inv_denominator_u[pos] * numerator_u;
// update dv
float numerator_v = (s_left*(v_left + dv_left) + s_up*(v_up + dv_up) + s_right*(v_right + dv_right) + s_down*(v_down + dv_down) -
v * (s_left + s_right + s_up + s_down) - g_numerator_v[pos] - numerator_dudv*du);
dv = (1.0f - omega) * dv + omega * g_inv_denominator_v[pos] * numerator_v;
}
new_du[pos] = du;
new_dv[pos] = dv;
}
///////////////////////////////////////////////////////////////////////////////
// utility functions
///////////////////////////////////////////////////////////////////////////////
void initTexture1D(texture<float, 1, hipReadModeElementType> &tex)
{
tex.addressMode[0] = hipAddressModeClamp;
tex.filterMode = hipFilterModePoint;
tex.normalized = false;
}
void initTexture2D(texture<float, 2, hipReadModeElementType> &tex)
{
tex.addressMode[0] = hipAddressModeMirror;
tex.addressMode[1] = hipAddressModeMirror;
tex.filterMode = hipFilterModeLinear;
tex.normalized = true;
}
void InitTextures()
{
initTexture2D(tex_I0);
initTexture2D(tex_I1);
initTexture2D(tex_fine); // for downsampling
initTexture2D(tex_coarse); // for prolongation
initTexture2D(tex_Ix);
initTexture2D(tex_Ixx);
initTexture2D(tex_Ix0);
initTexture2D(tex_Iy);
initTexture2D(tex_Iyy);
initTexture2D(tex_Iy0);
initTexture2D(tex_Ixy);
initTexture1D(tex_u);
initTexture1D(tex_v);
initTexture1D(tex_du);
initTexture1D(tex_dv);
initTexture1D(tex_diffusivity_x);
initTexture1D(tex_diffusivity_y);
initTexture1D(tex_inv_denominator_u);
initTexture1D(tex_inv_denominator_v);
initTexture1D(tex_numerator_dudv);
initTexture1D(tex_numerator_u);
initTexture1D(tex_numerator_v);
}
namespace
{
struct ImagePyramid
{
std::vector<FloatVector*> img0;
std::vector<FloatVector*> img1;
std::vector<Ncv32u> w;
std::vector<Ncv32u> h;
explicit ImagePyramid(int outer_iterations)
{
img0.reserve(outer_iterations);
img1.reserve(outer_iterations);
w.reserve(outer_iterations);
h.reserve(outer_iterations);
}
~ImagePyramid()
{
w.clear();
h.clear();
for (int i = static_cast<int>(img0.size()) - 1; i >= 0; --i)
{
delete img1[i];
delete img0[i];
}
img0.clear();
img1.clear();
}
};
}
/////////////////////////////////////////////////////////////////////////////////////////
// MAIN FUNCTION
/////////////////////////////////////////////////////////////////////////////////////////
NCVStatus NCVBroxOpticalFlow(const NCVBroxOpticalFlowDescriptor desc,
INCVMemAllocator &gpu_mem_allocator,
const NCVMatrix<Ncv32f> &frame0,
const NCVMatrix<Ncv32f> &frame1,
NCVMatrix<Ncv32f> &uOut,
NCVMatrix<Ncv32f> &vOut,
hipStream_t stream)
{
ncvAssertPrintReturn(desc.alpha > 0.0f , "Invalid alpha" , NCV_INCONSISTENT_INPUT);
ncvAssertPrintReturn(desc.gamma >= 0.0f , "Invalid gamma" , NCV_INCONSISTENT_INPUT);
ncvAssertPrintReturn(desc.number_of_inner_iterations > 0 , "Invalid number of inner iterations" , NCV_INCONSISTENT_INPUT);
ncvAssertPrintReturn(desc.number_of_outer_iterations > 0 , "Invalid number of outer iterations" , NCV_INCONSISTENT_INPUT);
ncvAssertPrintReturn(desc.number_of_solver_iterations > 0, "Invalid number of solver iterations", NCV_INCONSISTENT_INPUT);
const Ncv32u kSourceWidth = frame0.width();
const Ncv32u kSourceHeight = frame0.height();
ncvAssertPrintReturn(frame1.width() == kSourceWidth && frame1.height() == kSourceHeight, "Frame dims do not match", NCV_INCONSISTENT_INPUT);
ncvAssertReturn(uOut.width() == kSourceWidth && vOut.width() == kSourceWidth &&
uOut.height() == kSourceHeight && vOut.height() == kSourceHeight, NCV_INCONSISTENT_INPUT);
ncvAssertReturn(gpu_mem_allocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED);
bool kSkipProcessing = gpu_mem_allocator.isCounting();
int cuda_device;
ncvAssertCUDAReturn(hipGetDevice(&cuda_device), NCV_CUDA_ERROR);
hipDeviceProp_t device_props;
ncvAssertCUDAReturn(hipGetDeviceProperties(&device_props, cuda_device), NCV_CUDA_ERROR);
Ncv32u alignmentValue = gpu_mem_allocator.alignment ();
const Ncv32u kStrideAlignmentFloat = alignmentValue / sizeof(float);
const Ncv32u kSourcePitch = alignUp(kSourceWidth, kStrideAlignmentFloat) * sizeof(float);
const Ncv32f scale_factor = desc.scale_factor;
const Ncv32f alpha = desc.alpha;
const Ncv32f gamma = desc.gamma;
const Ncv32u kSizeInPixelsAligned = alignUp(kSourceWidth, kStrideAlignmentFloat)*kSourceHeight;
#if defined SAFE_VECTOR_DECL
#undef SAFE_VECTOR_DECL
#endif
#define SAFE_VECTOR_DECL(name, allocator, size) \
FloatVector name((allocator), (size)); \
ncvAssertReturn(name.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
// matrix elements
SAFE_VECTOR_DECL(diffusivity_x, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(diffusivity_y, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(denom_u, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(denom_v, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(num_dudv, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(num_u, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(num_v, gpu_mem_allocator, kSizeInPixelsAligned);
// flow components
SAFE_VECTOR_DECL(u, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(v, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(u_new, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(v_new, gpu_mem_allocator, kSizeInPixelsAligned);
// flow increments
SAFE_VECTOR_DECL(du, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(dv, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(du_new, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(dv_new, gpu_mem_allocator, kSizeInPixelsAligned);
// temporary storage
SAFE_VECTOR_DECL(device_buffer, gpu_mem_allocator,
alignUp(kSourceWidth, kStrideAlignmentFloat) * alignUp(kSourceHeight, kStrideAlignmentFloat));
// image derivatives
SAFE_VECTOR_DECL(Ix, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(Ixx, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(Ix0, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(Iy, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(Iyy, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(Iy0, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(Ixy, gpu_mem_allocator, kSizeInPixelsAligned);
// spatial derivative filter size
const int kDFilterSize = 5;
SAFE_VECTOR_DECL(derivativeFilter, gpu_mem_allocator, kDFilterSize);
if (!kSkipProcessing)
{
const float derivativeFilterHost[kDFilterSize] = {1.0f, -8.0f, 0.0f, 8.0f, -1.0f};
ncvAssertCUDAReturn(hipMemcpy(derivativeFilter.ptr(), derivativeFilterHost, sizeof(float) * kDFilterSize,
hipMemcpyHostToDevice), NCV_CUDA_ERROR);
InitTextures();
}
//prepare image pyramid
ImagePyramid pyr(desc.number_of_outer_iterations);
hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float>();
float scale = 1.0f;
//cuda arrays for frames
std::auto_ptr<FloatVector> pI0(new FloatVector(gpu_mem_allocator, kSizeInPixelsAligned));
ncvAssertReturn(pI0->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
std::auto_ptr<FloatVector> pI1(new FloatVector(gpu_mem_allocator, kSizeInPixelsAligned));
ncvAssertReturn(pI1->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
if (!kSkipProcessing)
{
//copy frame data to device
size_t dst_width_in_bytes = alignUp(kSourceWidth, kStrideAlignmentFloat) * sizeof(float);
size_t src_width_in_bytes = kSourceWidth * sizeof(float);
size_t src_pitch_in_bytes = frame0.pitch();
ncvAssertCUDAReturn( hipMemcpy2DAsync(pI0->ptr(), dst_width_in_bytes, frame0.ptr(),
src_pitch_in_bytes, src_width_in_bytes, kSourceHeight, hipMemcpyDeviceToDevice, stream), NCV_CUDA_ERROR );
ncvAssertCUDAReturn( hipMemcpy2DAsync(pI1->ptr(), dst_width_in_bytes, frame1.ptr(),
src_pitch_in_bytes, src_width_in_bytes, kSourceHeight, hipMemcpyDeviceToDevice, stream), NCV_CUDA_ERROR );
}
FloatVector* I0 = pI0.release();
FloatVector* I1 = pI1.release();
//prepare pyramid
pyr.img0.push_back(I0);
pyr.img1.push_back(I1);
pyr.w.push_back(kSourceWidth);
pyr.h.push_back(kSourceHeight);
scale *= scale_factor;
Ncv32u prev_level_width = kSourceWidth;
Ncv32u prev_level_height = kSourceHeight;
while((prev_level_width > 15) && (prev_level_height > 15) && (static_cast<Ncv32u>(pyr.img0.size()) < desc.number_of_outer_iterations))
{
//current resolution
Ncv32u level_width = static_cast<Ncv32u>(ceilf(kSourceWidth * scale));
Ncv32u level_height = static_cast<Ncv32u>(ceilf(kSourceHeight * scale));
Ncv32u level_width_aligned = alignUp(level_width, kStrideAlignmentFloat);
Ncv32u buffer_size = alignUp(level_width, kStrideAlignmentFloat) * level_height; // buffer size in floats
Ncv32u prev_level_pitch = alignUp(prev_level_width, kStrideAlignmentFloat) * sizeof(float);
std::auto_ptr<FloatVector> level_frame0(new FloatVector(gpu_mem_allocator, buffer_size));
ncvAssertReturn(level_frame0->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
std::auto_ptr<FloatVector> level_frame1(new FloatVector(gpu_mem_allocator, buffer_size));
ncvAssertReturn(level_frame1->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
if (!kSkipProcessing)
{
ncvAssertCUDAReturn(hipStreamSynchronize(stream), NCV_CUDA_ERROR);
NcvSize32u srcSize (prev_level_width, prev_level_height);
NcvSize32u dstSize (level_width, level_height);
NcvRect32u srcROI (0, 0, prev_level_width, prev_level_height);
NcvRect32u dstROI (0, 0, level_width, level_height);
// frame 0
ncvAssertReturnNcvStat( nppiStResize_32f_C1R (I0->ptr(), srcSize, prev_level_pitch, srcROI,
level_frame0->ptr(), dstSize, level_width_aligned * sizeof (float), dstROI, scale_factor, scale_factor, nppStSupersample) );
// frame 1
ncvAssertReturnNcvStat( nppiStResize_32f_C1R (I1->ptr(), srcSize, prev_level_pitch, srcROI,
level_frame1->ptr(), dstSize, level_width_aligned * sizeof (float), dstROI, scale_factor, scale_factor, nppStSupersample) );
}
I0 = level_frame0.release();
I1 = level_frame1.release();
//store pointers
pyr.img0.push_back(I0);
pyr.img1.push_back(I1);
pyr.w.push_back(level_width);
pyr.h.push_back(level_height);
scale *= scale_factor;
prev_level_width = level_width;
prev_level_height = level_height;
}
if (!kSkipProcessing)
{
//initial values for flow is 0
ncvAssertCUDAReturn(hipMemsetAsync(u.ptr(), 0, kSizeInPixelsAligned * sizeof(float), stream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipMemsetAsync(v.ptr(), 0, kSizeInPixelsAligned * sizeof(float), stream), NCV_CUDA_ERROR);
//select images with lowest resolution
size_t pitch = alignUp(pyr.w.back(), kStrideAlignmentFloat) * sizeof(float);
ncvAssertCUDAReturn(hipBindTexture2D(0, tex_I0, pyr.img0.back()->ptr(), channel_desc, pyr.w.back(), pyr.h.back(), pitch), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture2D(0, tex_I1, pyr.img1.back()->ptr(), channel_desc, pyr.w.back(), pyr.h.back(), pitch), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(stream), NCV_CUDA_ERROR);
FloatVector* ptrU = &u;
FloatVector* ptrV = &v;
FloatVector* ptrUNew = &u_new;
FloatVector* ptrVNew = &v_new;
std::vector<FloatVector*>::const_reverse_iterator img0Iter = pyr.img0.rbegin();
std::vector<FloatVector*>::const_reverse_iterator img1Iter = pyr.img1.rbegin();
//outer loop
//warping fixed point iteration
while(!pyr.w.empty())
{
//current grid dimensions
const Ncv32u kLevelWidth = pyr.w.back();
const Ncv32u kLevelHeight = pyr.h.back();
const Ncv32u kLevelStride = alignUp(kLevelWidth, kStrideAlignmentFloat);
//size of current image in bytes
const int kLevelSizeInBytes = kLevelStride * kLevelHeight * sizeof(float);
//number of points at current resolution
const int kLevelSizeInPixels = kLevelStride * kLevelHeight;
//initial guess for du and dv
ncvAssertCUDAReturn(hipMemsetAsync(du.ptr(), 0, kLevelSizeInBytes, stream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipMemsetAsync(dv.ptr(), 0, kLevelSizeInBytes, stream), NCV_CUDA_ERROR);
//texture format descriptor
hipChannelFormatDesc ch_desc = hipCreateChannelDesc<float>();
I0 = *img0Iter;
I1 = *img1Iter;
++img0Iter;
++img1Iter;
ncvAssertCUDAReturn(hipBindTexture2D(0, tex_I0, I0->ptr(), ch_desc, kLevelWidth, kLevelHeight, kLevelStride*sizeof(float)), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture2D(0, tex_I1, I1->ptr(), ch_desc, kLevelWidth, kLevelHeight, kLevelStride*sizeof(float)), NCV_CUDA_ERROR);
//compute derivatives
dim3 dBlocks(iDivUp(kLevelWidth, 32), iDivUp(kLevelHeight, 6));
dim3 dThreads(32, 6);
const int kPitchTex = kLevelStride * sizeof(float);
NcvSize32u srcSize(kLevelWidth, kLevelHeight);
Ncv32u nSrcStep = kLevelStride * sizeof(float);
NcvRect32u oROI(0, 0, kLevelWidth, kLevelHeight);
// Ix0
ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (I0->ptr(), srcSize, nSrcStep, Ix0.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
// Iy0
ncvAssertReturnNcvStat( nppiStFilterColumnBorder_32f_C1R (I0->ptr(), srcSize, nSrcStep, Iy0.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
// Ix
ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (I1->ptr(), srcSize, nSrcStep, Ix.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
// Iy
ncvAssertReturnNcvStat( nppiStFilterColumnBorder_32f_C1R (I1->ptr(), srcSize, nSrcStep, Iy.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
// Ixx
ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (Ix.ptr(), srcSize, nSrcStep, Ixx.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
// Iyy
ncvAssertReturnNcvStat( nppiStFilterColumnBorder_32f_C1R (Iy.ptr(), srcSize, nSrcStep, Iyy.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
// Ixy
ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (Iy.ptr(), srcSize, nSrcStep, Ixy.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
ncvAssertCUDAReturn(hipBindTexture2D(0, tex_Ix, Ix.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture2D(0, tex_Ixx, Ixx.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture2D(0, tex_Ix0, Ix0.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture2D(0, tex_Iy, Iy.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture2D(0, tex_Iyy, Iyy.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture2D(0, tex_Iy0, Iy0.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture2D(0, tex_Ixy, Ixy.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
// flow
ncvAssertCUDAReturn(hipBindTexture(0, tex_u, ptrU->ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture(0, tex_v, ptrV->ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
// flow increments
ncvAssertCUDAReturn(hipBindTexture(0, tex_du, du.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture(0, tex_dv, dv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
dim3 psor_blocks(iDivUp(kLevelWidth, PSOR_TILE_WIDTH), iDivUp(kLevelHeight, PSOR_TILE_HEIGHT));
dim3 psor_threads(PSOR_TILE_WIDTH, PSOR_TILE_HEIGHT);
dim3 sor_blocks(iDivUp(kLevelWidth, SOR_TILE_WIDTH), iDivUp(kLevelHeight, SOR_TILE_HEIGHT));
dim3 sor_threads(SOR_TILE_WIDTH, SOR_TILE_HEIGHT);
// inner loop
// lagged nonlinearity fixed point iteration
ncvAssertCUDAReturn(hipStreamSynchronize(stream), NCV_CUDA_ERROR);
for (Ncv32u current_inner_iteration = 0; current_inner_iteration < desc.number_of_inner_iterations; ++current_inner_iteration)
{
//compute coefficients
hipLaunchKernelGGL(( prepare_sor_stage_1_tex), dim3(psor_blocks), dim3(psor_threads), 0, stream,
diffusivity_x.ptr(),
diffusivity_y.ptr(),
denom_u.ptr(),
denom_v.ptr(),
num_dudv.ptr(),
num_u.ptr(),
num_v.ptr(),
kLevelWidth,
kLevelHeight,
kLevelStride,
alpha,
gamma);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture(0, tex_diffusivity_x, diffusivity_x.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture(0, tex_diffusivity_y, diffusivity_y.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture(0, tex_numerator_dudv, num_dudv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture(0, tex_numerator_u, num_u.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture(0, tex_numerator_v, num_v.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
hipLaunchKernelGGL(( prepare_sor_stage_2), dim3(psor_blocks), dim3(psor_threads), 0, stream, denom_u.ptr(), denom_v.ptr(), kLevelWidth, kLevelHeight, kLevelStride);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
// linear system coefficients
ncvAssertCUDAReturn(hipBindTexture(0, tex_diffusivity_x, diffusivity_x.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture(0, tex_diffusivity_y, diffusivity_y.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture(0, tex_numerator_dudv, num_dudv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture(0, tex_numerator_u, num_u.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture(0, tex_numerator_v, num_v.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture(0, tex_inv_denominator_u, denom_u.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture(0, tex_inv_denominator_v, denom_v.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
//solve linear system
for (Ncv32u solver_iteration = 0; solver_iteration < desc.number_of_solver_iterations; ++solver_iteration)
{
float omega = 1.99f;
ncvAssertCUDAReturn(hipBindTexture(0, tex_du, du.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture(0, tex_dv, dv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
hipLaunchKernelGGL(( sor_pass<0>), dim3(sor_blocks), dim3(sor_threads), 0, stream,
du_new.ptr(),
dv_new.ptr(),
denom_u.ptr(),
denom_v.ptr(),
num_u.ptr(),
num_v.ptr(),
num_dudv.ptr(),
omega,
kLevelWidth,
kLevelHeight,
kLevelStride);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture(0, tex_du, du_new.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture(0, tex_dv, dv_new.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
hipLaunchKernelGGL(( sor_pass<1>), dim3(sor_blocks), dim3(sor_threads), 0, stream,
du.ptr(),
dv.ptr(),
denom_u.ptr(),
denom_v.ptr(),
num_u.ptr(),
num_v.ptr(),
num_dudv.ptr(),
omega,
kLevelWidth,
kLevelHeight,
kLevelStride);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture(0, tex_du, du.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture(0, tex_dv, dv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
}//end of solver loop
}// end of inner loop
//update u and v
add(ptrU->ptr(), du.ptr(), kLevelSizeInPixels, stream);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
add(ptrV->ptr(), dv.ptr(), kLevelSizeInPixels, stream);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
//prolongate using texture
pyr.w.pop_back();
pyr.h.pop_back();
if (!pyr.w.empty())
{
//compute new image size
Ncv32u nw = pyr.w.back();
Ncv32u nh = pyr.h.back();
Ncv32u ns = alignUp(nw, kStrideAlignmentFloat);
dim3 p_blocks(iDivUp(nw, 32), iDivUp(nh, 8));
dim3 p_threads(32, 8);
NcvSize32u inner_srcSize (kLevelWidth, kLevelHeight);
NcvSize32u dstSize (nw, nh);
NcvRect32u srcROI (0, 0, kLevelWidth, kLevelHeight);
NcvRect32u dstROI (0, 0, nw, nh);
ncvAssertReturnNcvStat( nppiStResize_32f_C1R (ptrU->ptr(), inner_srcSize, kLevelStride * sizeof (float), srcROI,
ptrUNew->ptr(), dstSize, ns * sizeof (float), dstROI, 1.0f/scale_factor, 1.0f/scale_factor, nppStBicubic) );
ScaleVector(ptrUNew->ptr(), ptrUNew->ptr(), 1.0f/scale_factor, ns * nh, stream);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
ncvAssertReturnNcvStat( nppiStResize_32f_C1R (ptrV->ptr(), inner_srcSize, kLevelStride * sizeof (float), srcROI,
ptrVNew->ptr(), dstSize, ns * sizeof (float), dstROI, 1.0f/scale_factor, 1.0f/scale_factor, nppStBicubic) );
ScaleVector(ptrVNew->ptr(), ptrVNew->ptr(), 1.0f/scale_factor, ns * nh, stream);
ncvAssertCUDALastErrorReturn((int)NCV_CUDA_ERROR);
cv::gpu::device::swap<FloatVector*>(ptrU, ptrUNew);
cv::gpu::device::swap<FloatVector*>(ptrV, ptrVNew);
}
scale /= scale_factor;
}
// end of warping iterations
ncvAssertCUDAReturn(hipStreamSynchronize(stream), (int)NCV_CUDA_ERROR);
ncvAssertCUDAReturn( hipMemcpy2DAsync
(uOut.ptr(), uOut.pitch(), ptrU->ptr(),
kSourcePitch, kSourceWidth*sizeof(float), kSourceHeight, hipMemcpyDeviceToDevice, stream), (int)NCV_CUDA_ERROR );
ncvAssertCUDAReturn( hipMemcpy2DAsync
(vOut.ptr(), vOut.pitch(), ptrV->ptr(),
kSourcePitch, kSourceWidth*sizeof(float), kSourceHeight, hipMemcpyDeviceToDevice, stream), (int)NCV_CUDA_ERROR );
ncvAssertCUDAReturn(hipStreamSynchronize(stream), (int)NCV_CUDA_ERROR);
}
return NCV_SUCCESS;
}
#endif /* CUDA_DISABLER */ | 61fdea0d59a2508b452b35b615796244a8d52b95.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
////////////////////////////////////////////////////////////////////////////////
//
// NVIDIA CUDA implementation of Brox et al Optical Flow algorithm
//
// Algorithm is explained in the original paper:
// T. Brox, A. Bruhn, N. Papenberg, J. Weickert:
// High accuracy optical flow estimation based on a theory for warping.
// ECCV 2004.
//
// Implementation by Mikhail Smirnov
// email: msmirnov@nvidia.com, devsupport@nvidia.com
//
// Credits for help with the code to:
// Alexey Mendelenko, Anton Obukhov, and Alexander Kharlamov.
//
////////////////////////////////////////////////////////////////////////////////
#if !defined CUDA_DISABLER
#include <iostream>
#include <vector>
#include <memory>
#include "NPP_staging/NPP_staging.hpp"
#include "NCVBroxOpticalFlow.hpp"
#include "opencv2/gpu/device/utility.hpp"
typedef NCVVectorAlloc<Ncv32f> FloatVector;
/////////////////////////////////////////////////////////////////////////////////////////
// Implementation specific constants
/////////////////////////////////////////////////////////////////////////////////////////
__device__ const float eps2 = 1e-6f;
/////////////////////////////////////////////////////////////////////////////////////////
// Additional defines
/////////////////////////////////////////////////////////////////////////////////////////
// rounded up division
inline int iDivUp(int a, int b)
{
return (a + b - 1)/b;
}
/////////////////////////////////////////////////////////////////////////////////////////
// Texture references
/////////////////////////////////////////////////////////////////////////////////////////
texture<float, 2, cudaReadModeElementType> tex_coarse;
texture<float, 2, cudaReadModeElementType> tex_fine;
texture<float, 2, cudaReadModeElementType> tex_I1;
texture<float, 2, cudaReadModeElementType> tex_I0;
texture<float, 2, cudaReadModeElementType> tex_Ix;
texture<float, 2, cudaReadModeElementType> tex_Ixx;
texture<float, 2, cudaReadModeElementType> tex_Ix0;
texture<float, 2, cudaReadModeElementType> tex_Iy;
texture<float, 2, cudaReadModeElementType> tex_Iyy;
texture<float, 2, cudaReadModeElementType> tex_Iy0;
texture<float, 2, cudaReadModeElementType> tex_Ixy;
texture<float, 1, cudaReadModeElementType> tex_u;
texture<float, 1, cudaReadModeElementType> tex_v;
texture<float, 1, cudaReadModeElementType> tex_du;
texture<float, 1, cudaReadModeElementType> tex_dv;
texture<float, 1, cudaReadModeElementType> tex_numerator_dudv;
texture<float, 1, cudaReadModeElementType> tex_numerator_u;
texture<float, 1, cudaReadModeElementType> tex_numerator_v;
texture<float, 1, cudaReadModeElementType> tex_inv_denominator_u;
texture<float, 1, cudaReadModeElementType> tex_inv_denominator_v;
texture<float, 1, cudaReadModeElementType> tex_diffusivity_x;
texture<float, 1, cudaReadModeElementType> tex_diffusivity_y;
/////////////////////////////////////////////////////////////////////////////////////////
// SUPPLEMENTARY FUNCTIONS
/////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
/// \brief performs pointwise summation of two vectors stored in device memory
/// \param d_res - pointer to resulting vector (device memory)
/// \param d_op1 - term #1 (device memory)
/// \param d_op2 - term #2 (device memory)
/// \param len - vector size
///////////////////////////////////////////////////////////////////////////////
__global__ void pointwise_add(float *d_res, const float *d_op1, const float *d_op2, const int len)
{
const int pos = blockIdx.x*blockDim.x + threadIdx.x;
if(pos >= len) return;
d_res[pos] = d_op1[pos] + d_op2[pos];
}
///////////////////////////////////////////////////////////////////////////////
/// \brief wrapper for summation kernel.
/// Computes \b op1 + \b op2 and stores result to \b res
/// \param res array, containing op1 + op2 (device memory)
/// \param op1 term #1 (device memory)
/// \param op2 term #2 (device memory)
/// \param count vector size
///////////////////////////////////////////////////////////////////////////////
static void add(float *res, const float *op1, const float *op2, const int count, cudaStream_t stream)
{
dim3 threads(256);
dim3 blocks(iDivUp(count, threads.x));
pointwise_add<<<blocks, threads, 0, stream>>>(res, op1, op2, count);
}
///////////////////////////////////////////////////////////////////////////////
/// \brief wrapper for summation kernel.
/// Increments \b res by \b rhs
/// \param res initial vector, will be replaced with result (device memory)
/// \param rhs increment (device memory)
/// \param count vector size
///////////////////////////////////////////////////////////////////////////////
static void add(float *res, const float *rhs, const int count, cudaStream_t stream)
{
add(res, res, rhs, count, stream);
}
///////////////////////////////////////////////////////////////////////////////
/// \brief kernel for scaling vector by scalar
/// \param d_res scaled vector (device memory)
/// \param d_src source vector (device memory)
/// \param scale scalar to scale by
/// \param len vector size (number of elements)
///////////////////////////////////////////////////////////////////////////////
__global__ void scaleVector(float *d_res, const float *d_src, float scale, const int len)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos >= len) return;
d_res[pos] = d_src[pos] * scale;
}
///////////////////////////////////////////////////////////////////////////////
/// \brief scale vector by scalar
///
/// kernel wrapper
/// \param d_res scaled vector (device memory)
/// \param d_src source vector (device memory)
/// \param scale scalar to scale by
/// \param len vector size (number of elements)
/// \param stream CUDA stream
///////////////////////////////////////////////////////////////////////////////
static void ScaleVector(float *d_res, const float *d_src, float scale, const int len, cudaStream_t stream)
{
dim3 threads(256);
dim3 blocks(iDivUp(len, threads.x));
scaleVector<<<blocks, threads, 0, stream>>>(d_res, d_src, scale, len);
}
const int SOR_TILE_WIDTH = 32;
const int SOR_TILE_HEIGHT = 6;
const int PSOR_TILE_WIDTH = 32;
const int PSOR_TILE_HEIGHT = 6;
const int PSOR_PITCH = PSOR_TILE_WIDTH + 4;
const int PSOR_HEIGHT = PSOR_TILE_HEIGHT + 4;
///////////////////////////////////////////////////////////////////////////////
///\brief Utility function. Compute smooth term diffusivity along x axis
///\param s (out) pointer to memory location for result (diffusivity)
///\param pos (in) position within shared memory array containing \b u
///\param u (in) shared memory array containing \b u
///\param v (in) shared memory array containing \b v
///\param du (in) shared memory array containing \b du
///\param dv (in) shared memory array containing \b dv
///////////////////////////////////////////////////////////////////////////////
__forceinline__ __device__ void diffusivity_along_x(float *s, int pos, const float *u, const float *v, const float *du, const float *dv)
{
//x derivative between pixels (i,j) and (i-1,j)
const int left = pos-1;
float u_x = u[pos] + du[pos] - u[left] - du[left];
float v_x = v[pos] + dv[pos] - v[left] - dv[left];
const int up = pos + PSOR_PITCH;
const int down = pos - PSOR_PITCH;
const int up_left = up - 1;
const int down_left = down-1;
//y derivative between pixels (i,j) and (i-1,j)
float u_y = 0.25f*(u[up] + du[up] + u[up_left] + du[up_left] - u[down] - du[down] - u[down_left] - du[down_left]);
float v_y = 0.25f*(v[up] + dv[up] + v[up_left] + dv[up_left] - v[down] - dv[down] - v[down_left] - dv[down_left]);
*s = 0.5f / sqrtf(u_x*u_x + v_x*v_x + u_y*u_y + v_y*v_y + eps2);
}
///////////////////////////////////////////////////////////////////////////////
///\brief Utility function. Compute smooth term diffusivity along y axis
///\param s (out) pointer to memory location for result (diffusivity)
///\param pos (in) position within shared memory array containing \b u
///\param u (in) shared memory array containing \b u
///\param v (in) shared memory array containing \b v
///\param du (in) shared memory array containing \b du
///\param dv (in) shared memory array containing \b dv
///////////////////////////////////////////////////////////////////////////////
__forceinline__ __device__ void diffusivity_along_y(float *s, int pos, const float *u, const float *v, const float *du, const float *dv)
{
//y derivative between pixels (i,j) and (i,j-1)
const int down = pos-PSOR_PITCH;
float u_y = u[pos] + du[pos] - u[down] - du[down];
float v_y = v[pos] + dv[pos] - v[down] - dv[down];
const int right = pos + 1;
const int left = pos - 1;
const int down_right = down + 1;
const int down_left = down - 1;
//x derivative between pixels (i,j) and (i,j-1);
float u_x = 0.25f*(u[right] + u[down_right] + du[right] + du[down_right] - u[left] - u[down_left] - du[left] - du[down_left]);
float v_x = 0.25f*(v[right] + v[down_right] + dv[right] + dv[down_right] - v[left] - v[down_left] - dv[left] - dv[down_left]);
*s = 0.5f/sqrtf(u_x*u_x + v_x*v_x + u_y*u_y + v_y*v_y + eps2);
}
///////////////////////////////////////////////////////////////////////////////
///\brief Utility function. Load element of 2D global memory to shared memory
///\param smem pointer to shared memory array
///\param is shared memory array column
///\param js shared memory array row
///\param w number of columns in global memory array
///\param h number of rows in global memory array
///\param p global memory array pitch in floats
///////////////////////////////////////////////////////////////////////////////
template<int tex_id>
__forceinline__ __device__ void load_array_element(float *smem, int is, int js, int i, int j, int w, int h, int p)
{
//position within shared memory array
const int ijs = js * PSOR_PITCH + is;
//mirror reflection across borders
i = max(i, -i-1);
i = min(i, w-i+w-1);
j = max(j, -j-1);
j = min(j, h-j+h-1);
const int pos = j * p + i;
switch(tex_id){
case 0:
smem[ijs] = tex1Dfetch(tex_u, pos);
break;
case 1:
smem[ijs] = tex1Dfetch(tex_v, pos);
break;
case 2:
smem[ijs] = tex1Dfetch(tex_du, pos);
break;
case 3:
smem[ijs] = tex1Dfetch(tex_dv, pos);
break;
}
}
///////////////////////////////////////////////////////////////////////////////
///\brief Utility function. Load part (tile) of 2D global memory to shared memory
///\param smem pointer to target shared memory array
///\param ig column number within source
///\param jg row number within source
///\param w number of columns in global memory array
///\param h number of rows in global memory array
///\param p global memory array pitch in floats
///////////////////////////////////////////////////////////////////////////////
template<int tex>
__forceinline__ __device__ void load_array(float *smem, int ig, int jg, int w, int h, int p)
{
const int i = threadIdx.x + 2;
const int j = threadIdx.y + 2;
load_array_element<tex>(smem, i, j, ig, jg, w, h, p);//load current pixel
__syncthreads();
if(threadIdx.y < 2)
{
//load bottom shadow elements
load_array_element<tex>(smem, i, j-2, ig, jg-2, w, h, p);
if(threadIdx.x < 2)
{
//load bottom right shadow elements
load_array_element<tex>(smem, i+PSOR_TILE_WIDTH, j-2, ig+PSOR_TILE_WIDTH, jg-2, w, h, p);
//load middle right shadow elements
load_array_element<tex>(smem, i+PSOR_TILE_WIDTH, j, ig+PSOR_TILE_WIDTH, jg, w, h, p);
}
else if(threadIdx.x >= PSOR_TILE_WIDTH-2)
{
//load bottom left shadow elements
load_array_element<tex>(smem, i-PSOR_TILE_WIDTH, j-2, ig-PSOR_TILE_WIDTH, jg-2, w, h, p);
//load middle left shadow elements
load_array_element<tex>(smem, i-PSOR_TILE_WIDTH, j, ig-PSOR_TILE_WIDTH, jg, w, h, p);
}
}
else if(threadIdx.y >= PSOR_TILE_HEIGHT-2)
{
//load upper shadow elements
load_array_element<tex>(smem, i, j+2, ig, jg+2, w, h, p);
if(threadIdx.x < 2)
{
//load upper right shadow elements
load_array_element<tex>(smem, i+PSOR_TILE_WIDTH, j+2, ig+PSOR_TILE_WIDTH, jg+2, w, h, p);
//load middle right shadow elements
load_array_element<tex>(smem, i+PSOR_TILE_WIDTH, j, ig+PSOR_TILE_WIDTH, jg, w, h, p);
}
else if(threadIdx.x >= PSOR_TILE_WIDTH-2)
{
//load upper left shadow elements
load_array_element<tex>(smem, i-PSOR_TILE_WIDTH, j+2, ig-PSOR_TILE_WIDTH, jg+2, w, h, p);
//load middle left shadow elements
load_array_element<tex>(smem, i-PSOR_TILE_WIDTH, j, ig-PSOR_TILE_WIDTH, jg, w, h, p);
}
}
else
{
//load middle shadow elements
if(threadIdx.x < 2)
{
//load middle right shadow elements
load_array_element<tex>(smem, i+PSOR_TILE_WIDTH, j, ig+PSOR_TILE_WIDTH, jg, w, h, p);
}
else if(threadIdx.x >= PSOR_TILE_WIDTH-2)
{
//load middle left shadow elements
load_array_element<tex>(smem, i-PSOR_TILE_WIDTH, j, ig-PSOR_TILE_WIDTH, jg, w, h, p);
}
}
__syncthreads();
}
///////////////////////////////////////////////////////////////////////////////
/// \brief computes matrix of linearised system for \c du, \c dv
/// Computed values reside in GPU memory. \n
/// Matrix computation is divided into two steps. This kernel performs first step\n
/// - compute smoothness term diffusivity between pixels - psi dash smooth
/// - compute robustness factor in the data term - psi dash data
/// \param diffusivity_x (in/out) diffusivity between pixels along x axis in smoothness term
/// \param diffusivity_y (in/out) diffusivity between pixels along y axis in smoothness term
/// \param denominator_u (in/out) precomputed part of expression for new du value in SOR iteration
/// \param denominator_v (in/out) precomputed part of expression for new dv value in SOR iteration
/// \param numerator_dudv (in/out) precomputed part of expression for new du and dv value in SOR iteration
/// \param numerator_u (in/out) precomputed part of expression for new du value in SOR iteration
/// \param numerator_v (in/out) precomputed part of expression for new dv value in SOR iteration
/// \param w (in) frame width
/// \param h (in) frame height
/// \param pitch (in) pitch in floats
/// \param alpha (in) alpha in Brox model (flow smoothness)
/// \param gamma (in) gamma in Brox model (edge importance)
///////////////////////////////////////////////////////////////////////////////
__global__ void prepare_sor_stage_1_tex(float *diffusivity_x, float *diffusivity_y,
float *denominator_u, float *denominator_v,
float *numerator_dudv,
float *numerator_u, float *numerator_v,
int w, int h, int s,
float alpha, float gamma)
{
__shared__ float u[PSOR_PITCH * PSOR_HEIGHT];
__shared__ float v[PSOR_PITCH * PSOR_HEIGHT];
__shared__ float du[PSOR_PITCH * PSOR_HEIGHT];
__shared__ float dv[PSOR_PITCH * PSOR_HEIGHT];
//position within tile
const int i = threadIdx.x;
const int j = threadIdx.y;
//position within smem arrays
const int ijs = (j+2) * PSOR_PITCH + i + 2;
//position within global memory
const int ig = blockIdx.x * blockDim.x + threadIdx.x;
const int jg = blockIdx.y * blockDim.y + threadIdx.y;
const int ijg = jg * s + ig;
//position within texture
float x = (float)ig + 0.5f;
float y = (float)jg + 0.5f;
//load u and v to smem
load_array<0>(u, ig, jg, w, h, s);
load_array<1>(v, ig, jg, w, h, s);
load_array<2>(du, ig, jg, w, h, s);
load_array<3>(dv, ig, jg, w, h, s);
//warped position
float wx = (x + u[ijs])/(float)w;
float wy = (y + v[ijs])/(float)h;
x /= (float)w;
y /= (float)h;
//compute image derivatives
const float Iz = tex2D(tex_I1, wx, wy) - tex2D(tex_I0, x, y);
const float Ix = tex2D(tex_Ix, wx, wy);
const float Ixz = Ix - tex2D(tex_Ix0, x, y);
const float Ixy = tex2D(tex_Ixy, wx, wy);
const float Ixx = tex2D(tex_Ixx, wx, wy);
const float Iy = tex2D(tex_Iy, wx, wy);
const float Iyz = Iy - tex2D(tex_Iy0, x, y);
const float Iyy = tex2D(tex_Iyy, wx, wy);
//compute data term
float q0, q1, q2;
q0 = Iz + Ix * du[ijs] + Iy * dv[ijs];
q1 = Ixz + Ixx * du[ijs] + Ixy * dv[ijs];
q2 = Iyz + Ixy * du[ijs] + Iyy * dv[ijs];
float data_term = 0.5f * rsqrtf(q0*q0 + gamma*(q1*q1 + q2*q2) + eps2);
//scale data term by 1/alpha
data_term /= alpha;
//compute smoothness term (diffusivity)
float sx, sy;
if(ig >= w || jg >= h) return;
diffusivity_along_x(&sx, ijs, u, v, du, dv);
diffusivity_along_y(&sy, ijs, u, v, du, dv);
if(ig == 0) sx = 0.0f;
if(jg == 0) sy = 0.0f;
numerator_dudv[ijg] = data_term * (Ix*Iy + gamma * Ixy*(Ixx + Iyy));
numerator_u[ijg] = data_term * (Ix*Iz + gamma * (Ixx*Ixz + Ixy*Iyz));
numerator_v[ijg] = data_term * (Iy*Iz + gamma * (Iyy*Iyz + Ixy*Ixz));
denominator_u[ijg] = data_term * (Ix*Ix + gamma * (Ixy*Ixy + Ixx*Ixx));
denominator_v[ijg] = data_term * (Iy*Iy + gamma * (Ixy*Ixy + Iyy*Iyy));
diffusivity_x[ijg] = sx;
diffusivity_y[ijg] = sy;
}
///////////////////////////////////////////////////////////////////////////////
///\brief computes matrix of linearised system for \c du, \c dv
///\param inv_denominator_u
///\param inv_denominator_v
///\param w
///\param h
///\param s
///////////////////////////////////////////////////////////////////////////////
__global__ void prepare_sor_stage_2(float *inv_denominator_u, float *inv_denominator_v,
int w, int h, int s)
{
__shared__ float sx[(PSOR_TILE_WIDTH+1) * (PSOR_TILE_HEIGHT+1)];
__shared__ float sy[(PSOR_TILE_WIDTH+1) * (PSOR_TILE_HEIGHT+1)];
//position within tile
const int i = threadIdx.x;
const int j = threadIdx.y;
//position within smem arrays
const int ijs = j*(PSOR_TILE_WIDTH+1) + i;
//position within global memory
const int ig = blockIdx.x * blockDim.x + threadIdx.x;
const int jg = blockIdx.y * blockDim.y + threadIdx.y;
const int ijg = jg*s + ig;
int inside = ig < w && jg < h;
float denom_u;
float denom_v;
if(inside)
{
denom_u = inv_denominator_u[ijg];
denom_v = inv_denominator_v[ijg];
}
if(inside)
{
sx[ijs] = tex1Dfetch(tex_diffusivity_x, ijg);
sy[ijs] = tex1Dfetch(tex_diffusivity_y, ijg);
}
else
{
sx[ijs] = 0.0f;
sy[ijs] = 0.0f;
}
int up = ijs+PSOR_TILE_WIDTH+1;
if(j == PSOR_TILE_HEIGHT-1)
{
if(jg < h-1 && inside)
{
sy[up] = tex1Dfetch(tex_diffusivity_y, ijg + s);
}
else
{
sy[up] = 0.0f;
}
}
int right = ijs + 1;
if(threadIdx.x == PSOR_TILE_WIDTH-1)
{
if(ig < w-1 && inside)
{
sx[right] = tex1Dfetch(tex_diffusivity_x, ijg + 1);
}
else
{
sx[right] = 0.0f;
}
}
__syncthreads();
float diffusivity_sum;
diffusivity_sum = sx[ijs] + sx[ijs+1] + sy[ijs] + sy[ijs+PSOR_TILE_WIDTH+1];
if(inside)
{
denom_u += diffusivity_sum;
denom_v += diffusivity_sum;
inv_denominator_u[ijg] = 1.0f/denom_u;
inv_denominator_v[ijg] = 1.0f/denom_v;
}
}
/////////////////////////////////////////////////////////////////////////////////////////
// Red-Black SOR
/////////////////////////////////////////////////////////////////////////////////////////
template<int isBlack> __global__ void sor_pass(float *new_du,
float *new_dv,
const float *g_inv_denominator_u,
const float *g_inv_denominator_v,
const float *g_numerator_u,
const float *g_numerator_v,
const float *g_numerator_dudv,
float omega,
int width,
int height,
int stride)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i >= width || j >= height)
return;
const int pos = j * stride + i;
const int pos_r = i < width - 1 ? pos + 1 : pos;
const int pos_u = j < height - 1 ? pos + stride : pos;
const int pos_d = j > 0 ? pos - stride : pos;
const int pos_l = i > 0 ? pos - 1 : pos;
//load smooth term
float s_up, s_left, s_right, s_down;
s_left = tex1Dfetch(tex_diffusivity_x, pos);
s_down = tex1Dfetch(tex_diffusivity_y, pos);
if(i < width-1)
s_right = tex1Dfetch(tex_diffusivity_x, pos_r);
else
s_right = 0.0f; //Neumann BC
if(j < height-1)
s_up = tex1Dfetch(tex_diffusivity_y, pos_u);
else
s_up = 0.0f; //Neumann BC
//load u, v and du, dv
float u_up, u_left, u_right, u_down, u;
float v_up, v_left, v_right, v_down, v;
float du_up, du_left, du_right, du_down, du;
float dv_up, dv_left, dv_right, dv_down, dv;
u_left = tex1Dfetch(tex_u, pos_l);
u_right = tex1Dfetch(tex_u, pos_r);
u_down = tex1Dfetch(tex_u, pos_d);
u_up = tex1Dfetch(tex_u, pos_u);
u = tex1Dfetch(tex_u, pos);
v_left = tex1Dfetch(tex_v, pos_l);
v_right = tex1Dfetch(tex_v, pos_r);
v_down = tex1Dfetch(tex_v, pos_d);
v = tex1Dfetch(tex_v, pos);
v_up = tex1Dfetch(tex_v, pos_u);
du = tex1Dfetch(tex_du, pos);
du_left = tex1Dfetch(tex_du, pos_l);
du_right = tex1Dfetch(tex_du, pos_r);
du_down = tex1Dfetch(tex_du, pos_d);
du_up = tex1Dfetch(tex_du, pos_u);
dv = tex1Dfetch(tex_dv, pos);
dv_left = tex1Dfetch(tex_dv, pos_l);
dv_right = tex1Dfetch(tex_dv, pos_r);
dv_down = tex1Dfetch(tex_dv, pos_d);
dv_up = tex1Dfetch(tex_dv, pos_u);
float numerator_dudv = g_numerator_dudv[pos];
if((i+j)%2 == isBlack)
{
// update du
float numerator_u = (s_left*(u_left + du_left) + s_up*(u_up + du_up) + s_right*(u_right + du_right) + s_down*(u_down + du_down) -
u * (s_left + s_right + s_up + s_down) - g_numerator_u[pos] - numerator_dudv*dv);
du = (1.0f - omega) * du + omega * g_inv_denominator_u[pos] * numerator_u;
// update dv
float numerator_v = (s_left*(v_left + dv_left) + s_up*(v_up + dv_up) + s_right*(v_right + dv_right) + s_down*(v_down + dv_down) -
v * (s_left + s_right + s_up + s_down) - g_numerator_v[pos] - numerator_dudv*du);
dv = (1.0f - omega) * dv + omega * g_inv_denominator_v[pos] * numerator_v;
}
new_du[pos] = du;
new_dv[pos] = dv;
}
///////////////////////////////////////////////////////////////////////////////
// utility functions
///////////////////////////////////////////////////////////////////////////////
void initTexture1D(texture<float, 1, cudaReadModeElementType> &tex)
{
tex.addressMode[0] = cudaAddressModeClamp;
tex.filterMode = cudaFilterModePoint;
tex.normalized = false;
}
void initTexture2D(texture<float, 2, cudaReadModeElementType> &tex)
{
tex.addressMode[0] = cudaAddressModeMirror;
tex.addressMode[1] = cudaAddressModeMirror;
tex.filterMode = cudaFilterModeLinear;
tex.normalized = true;
}
void InitTextures()
{
initTexture2D(tex_I0);
initTexture2D(tex_I1);
initTexture2D(tex_fine); // for downsampling
initTexture2D(tex_coarse); // for prolongation
initTexture2D(tex_Ix);
initTexture2D(tex_Ixx);
initTexture2D(tex_Ix0);
initTexture2D(tex_Iy);
initTexture2D(tex_Iyy);
initTexture2D(tex_Iy0);
initTexture2D(tex_Ixy);
initTexture1D(tex_u);
initTexture1D(tex_v);
initTexture1D(tex_du);
initTexture1D(tex_dv);
initTexture1D(tex_diffusivity_x);
initTexture1D(tex_diffusivity_y);
initTexture1D(tex_inv_denominator_u);
initTexture1D(tex_inv_denominator_v);
initTexture1D(tex_numerator_dudv);
initTexture1D(tex_numerator_u);
initTexture1D(tex_numerator_v);
}
namespace
{
struct ImagePyramid
{
std::vector<FloatVector*> img0;
std::vector<FloatVector*> img1;
std::vector<Ncv32u> w;
std::vector<Ncv32u> h;
explicit ImagePyramid(int outer_iterations)
{
img0.reserve(outer_iterations);
img1.reserve(outer_iterations);
w.reserve(outer_iterations);
h.reserve(outer_iterations);
}
~ImagePyramid()
{
w.clear();
h.clear();
for (int i = static_cast<int>(img0.size()) - 1; i >= 0; --i)
{
delete img1[i];
delete img0[i];
}
img0.clear();
img1.clear();
}
};
}
/////////////////////////////////////////////////////////////////////////////////////////
// MAIN FUNCTION
/////////////////////////////////////////////////////////////////////////////////////////
NCVStatus NCVBroxOpticalFlow(const NCVBroxOpticalFlowDescriptor desc,
INCVMemAllocator &gpu_mem_allocator,
const NCVMatrix<Ncv32f> &frame0,
const NCVMatrix<Ncv32f> &frame1,
NCVMatrix<Ncv32f> &uOut,
NCVMatrix<Ncv32f> &vOut,
cudaStream_t stream)
{
ncvAssertPrintReturn(desc.alpha > 0.0f , "Invalid alpha" , NCV_INCONSISTENT_INPUT);
ncvAssertPrintReturn(desc.gamma >= 0.0f , "Invalid gamma" , NCV_INCONSISTENT_INPUT);
ncvAssertPrintReturn(desc.number_of_inner_iterations > 0 , "Invalid number of inner iterations" , NCV_INCONSISTENT_INPUT);
ncvAssertPrintReturn(desc.number_of_outer_iterations > 0 , "Invalid number of outer iterations" , NCV_INCONSISTENT_INPUT);
ncvAssertPrintReturn(desc.number_of_solver_iterations > 0, "Invalid number of solver iterations", NCV_INCONSISTENT_INPUT);
const Ncv32u kSourceWidth = frame0.width();
const Ncv32u kSourceHeight = frame0.height();
ncvAssertPrintReturn(frame1.width() == kSourceWidth && frame1.height() == kSourceHeight, "Frame dims do not match", NCV_INCONSISTENT_INPUT);
ncvAssertReturn(uOut.width() == kSourceWidth && vOut.width() == kSourceWidth &&
uOut.height() == kSourceHeight && vOut.height() == kSourceHeight, NCV_INCONSISTENT_INPUT);
ncvAssertReturn(gpu_mem_allocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED);
bool kSkipProcessing = gpu_mem_allocator.isCounting();
int cuda_device;
ncvAssertCUDAReturn(cudaGetDevice(&cuda_device), NCV_CUDA_ERROR);
cudaDeviceProp device_props;
ncvAssertCUDAReturn(cudaGetDeviceProperties(&device_props, cuda_device), NCV_CUDA_ERROR);
Ncv32u alignmentValue = gpu_mem_allocator.alignment ();
const Ncv32u kStrideAlignmentFloat = alignmentValue / sizeof(float);
const Ncv32u kSourcePitch = alignUp(kSourceWidth, kStrideAlignmentFloat) * sizeof(float);
const Ncv32f scale_factor = desc.scale_factor;
const Ncv32f alpha = desc.alpha;
const Ncv32f gamma = desc.gamma;
const Ncv32u kSizeInPixelsAligned = alignUp(kSourceWidth, kStrideAlignmentFloat)*kSourceHeight;
#if defined SAFE_VECTOR_DECL
#undef SAFE_VECTOR_DECL
#endif
#define SAFE_VECTOR_DECL(name, allocator, size) \
FloatVector name((allocator), (size)); \
ncvAssertReturn(name.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
// matrix elements
SAFE_VECTOR_DECL(diffusivity_x, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(diffusivity_y, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(denom_u, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(denom_v, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(num_dudv, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(num_u, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(num_v, gpu_mem_allocator, kSizeInPixelsAligned);
// flow components
SAFE_VECTOR_DECL(u, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(v, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(u_new, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(v_new, gpu_mem_allocator, kSizeInPixelsAligned);
// flow increments
SAFE_VECTOR_DECL(du, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(dv, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(du_new, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(dv_new, gpu_mem_allocator, kSizeInPixelsAligned);
// temporary storage
SAFE_VECTOR_DECL(device_buffer, gpu_mem_allocator,
alignUp(kSourceWidth, kStrideAlignmentFloat) * alignUp(kSourceHeight, kStrideAlignmentFloat));
// image derivatives
SAFE_VECTOR_DECL(Ix, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(Ixx, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(Ix0, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(Iy, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(Iyy, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(Iy0, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(Ixy, gpu_mem_allocator, kSizeInPixelsAligned);
// spatial derivative filter size
const int kDFilterSize = 5;
SAFE_VECTOR_DECL(derivativeFilter, gpu_mem_allocator, kDFilterSize);
if (!kSkipProcessing)
{
const float derivativeFilterHost[kDFilterSize] = {1.0f, -8.0f, 0.0f, 8.0f, -1.0f};
ncvAssertCUDAReturn(cudaMemcpy(derivativeFilter.ptr(), derivativeFilterHost, sizeof(float) * kDFilterSize,
cudaMemcpyHostToDevice), NCV_CUDA_ERROR);
InitTextures();
}
//prepare image pyramid
ImagePyramid pyr(desc.number_of_outer_iterations);
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float>();
float scale = 1.0f;
//cuda arrays for frames
std::auto_ptr<FloatVector> pI0(new FloatVector(gpu_mem_allocator, kSizeInPixelsAligned));
ncvAssertReturn(pI0->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
std::auto_ptr<FloatVector> pI1(new FloatVector(gpu_mem_allocator, kSizeInPixelsAligned));
ncvAssertReturn(pI1->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
if (!kSkipProcessing)
{
//copy frame data to device
size_t dst_width_in_bytes = alignUp(kSourceWidth, kStrideAlignmentFloat) * sizeof(float);
size_t src_width_in_bytes = kSourceWidth * sizeof(float);
size_t src_pitch_in_bytes = frame0.pitch();
ncvAssertCUDAReturn( cudaMemcpy2DAsync(pI0->ptr(), dst_width_in_bytes, frame0.ptr(),
src_pitch_in_bytes, src_width_in_bytes, kSourceHeight, cudaMemcpyDeviceToDevice, stream), NCV_CUDA_ERROR );
ncvAssertCUDAReturn( cudaMemcpy2DAsync(pI1->ptr(), dst_width_in_bytes, frame1.ptr(),
src_pitch_in_bytes, src_width_in_bytes, kSourceHeight, cudaMemcpyDeviceToDevice, stream), NCV_CUDA_ERROR );
}
FloatVector* I0 = pI0.release();
FloatVector* I1 = pI1.release();
//prepare pyramid
pyr.img0.push_back(I0);
pyr.img1.push_back(I1);
pyr.w.push_back(kSourceWidth);
pyr.h.push_back(kSourceHeight);
scale *= scale_factor;
Ncv32u prev_level_width = kSourceWidth;
Ncv32u prev_level_height = kSourceHeight;
while((prev_level_width > 15) && (prev_level_height > 15) && (static_cast<Ncv32u>(pyr.img0.size()) < desc.number_of_outer_iterations))
{
//current resolution
Ncv32u level_width = static_cast<Ncv32u>(ceilf(kSourceWidth * scale));
Ncv32u level_height = static_cast<Ncv32u>(ceilf(kSourceHeight * scale));
Ncv32u level_width_aligned = alignUp(level_width, kStrideAlignmentFloat);
Ncv32u buffer_size = alignUp(level_width, kStrideAlignmentFloat) * level_height; // buffer size in floats
Ncv32u prev_level_pitch = alignUp(prev_level_width, kStrideAlignmentFloat) * sizeof(float);
std::auto_ptr<FloatVector> level_frame0(new FloatVector(gpu_mem_allocator, buffer_size));
ncvAssertReturn(level_frame0->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
std::auto_ptr<FloatVector> level_frame1(new FloatVector(gpu_mem_allocator, buffer_size));
ncvAssertReturn(level_frame1->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
if (!kSkipProcessing)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(stream), NCV_CUDA_ERROR);
NcvSize32u srcSize (prev_level_width, prev_level_height);
NcvSize32u dstSize (level_width, level_height);
NcvRect32u srcROI (0, 0, prev_level_width, prev_level_height);
NcvRect32u dstROI (0, 0, level_width, level_height);
// frame 0
ncvAssertReturnNcvStat( nppiStResize_32f_C1R (I0->ptr(), srcSize, prev_level_pitch, srcROI,
level_frame0->ptr(), dstSize, level_width_aligned * sizeof (float), dstROI, scale_factor, scale_factor, nppStSupersample) );
// frame 1
ncvAssertReturnNcvStat( nppiStResize_32f_C1R (I1->ptr(), srcSize, prev_level_pitch, srcROI,
level_frame1->ptr(), dstSize, level_width_aligned * sizeof (float), dstROI, scale_factor, scale_factor, nppStSupersample) );
}
I0 = level_frame0.release();
I1 = level_frame1.release();
//store pointers
pyr.img0.push_back(I0);
pyr.img1.push_back(I1);
pyr.w.push_back(level_width);
pyr.h.push_back(level_height);
scale *= scale_factor;
prev_level_width = level_width;
prev_level_height = level_height;
}
if (!kSkipProcessing)
{
//initial values for flow is 0
ncvAssertCUDAReturn(cudaMemsetAsync(u.ptr(), 0, kSizeInPixelsAligned * sizeof(float), stream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaMemsetAsync(v.ptr(), 0, kSizeInPixelsAligned * sizeof(float), stream), NCV_CUDA_ERROR);
//select images with lowest resolution
size_t pitch = alignUp(pyr.w.back(), kStrideAlignmentFloat) * sizeof(float);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_I0, pyr.img0.back()->ptr(), channel_desc, pyr.w.back(), pyr.h.back(), pitch), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_I1, pyr.img1.back()->ptr(), channel_desc, pyr.w.back(), pyr.h.back(), pitch), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(stream), NCV_CUDA_ERROR);
FloatVector* ptrU = &u;
FloatVector* ptrV = &v;
FloatVector* ptrUNew = &u_new;
FloatVector* ptrVNew = &v_new;
std::vector<FloatVector*>::const_reverse_iterator img0Iter = pyr.img0.rbegin();
std::vector<FloatVector*>::const_reverse_iterator img1Iter = pyr.img1.rbegin();
//outer loop
//warping fixed point iteration
while(!pyr.w.empty())
{
//current grid dimensions
const Ncv32u kLevelWidth = pyr.w.back();
const Ncv32u kLevelHeight = pyr.h.back();
const Ncv32u kLevelStride = alignUp(kLevelWidth, kStrideAlignmentFloat);
//size of current image in bytes
const int kLevelSizeInBytes = kLevelStride * kLevelHeight * sizeof(float);
//number of points at current resolution
const int kLevelSizeInPixels = kLevelStride * kLevelHeight;
//initial guess for du and dv
ncvAssertCUDAReturn(cudaMemsetAsync(du.ptr(), 0, kLevelSizeInBytes, stream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaMemsetAsync(dv.ptr(), 0, kLevelSizeInBytes, stream), NCV_CUDA_ERROR);
//texture format descriptor
cudaChannelFormatDesc ch_desc = cudaCreateChannelDesc<float>();
I0 = *img0Iter;
I1 = *img1Iter;
++img0Iter;
++img1Iter;
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_I0, I0->ptr(), ch_desc, kLevelWidth, kLevelHeight, kLevelStride*sizeof(float)), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_I1, I1->ptr(), ch_desc, kLevelWidth, kLevelHeight, kLevelStride*sizeof(float)), NCV_CUDA_ERROR);
//compute derivatives
dim3 dBlocks(iDivUp(kLevelWidth, 32), iDivUp(kLevelHeight, 6));
dim3 dThreads(32, 6);
const int kPitchTex = kLevelStride * sizeof(float);
NcvSize32u srcSize(kLevelWidth, kLevelHeight);
Ncv32u nSrcStep = kLevelStride * sizeof(float);
NcvRect32u oROI(0, 0, kLevelWidth, kLevelHeight);
// Ix0
ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (I0->ptr(), srcSize, nSrcStep, Ix0.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
// Iy0
ncvAssertReturnNcvStat( nppiStFilterColumnBorder_32f_C1R (I0->ptr(), srcSize, nSrcStep, Iy0.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
// Ix
ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (I1->ptr(), srcSize, nSrcStep, Ix.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
// Iy
ncvAssertReturnNcvStat( nppiStFilterColumnBorder_32f_C1R (I1->ptr(), srcSize, nSrcStep, Iy.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
// Ixx
ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (Ix.ptr(), srcSize, nSrcStep, Ixx.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
// Iyy
ncvAssertReturnNcvStat( nppiStFilterColumnBorder_32f_C1R (Iy.ptr(), srcSize, nSrcStep, Iyy.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
// Ixy
ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (Iy.ptr(), srcSize, nSrcStep, Ixy.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ix, Ix.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ixx, Ixx.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ix0, Ix0.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Iy, Iy.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Iyy, Iyy.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Iy0, Iy0.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ixy, Ixy.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
// flow
ncvAssertCUDAReturn(cudaBindTexture(0, tex_u, ptrU->ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_v, ptrV->ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
// flow increments
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
dim3 psor_blocks(iDivUp(kLevelWidth, PSOR_TILE_WIDTH), iDivUp(kLevelHeight, PSOR_TILE_HEIGHT));
dim3 psor_threads(PSOR_TILE_WIDTH, PSOR_TILE_HEIGHT);
dim3 sor_blocks(iDivUp(kLevelWidth, SOR_TILE_WIDTH), iDivUp(kLevelHeight, SOR_TILE_HEIGHT));
dim3 sor_threads(SOR_TILE_WIDTH, SOR_TILE_HEIGHT);
// inner loop
// lagged nonlinearity fixed point iteration
ncvAssertCUDAReturn(cudaStreamSynchronize(stream), NCV_CUDA_ERROR);
for (Ncv32u current_inner_iteration = 0; current_inner_iteration < desc.number_of_inner_iterations; ++current_inner_iteration)
{
//compute coefficients
prepare_sor_stage_1_tex<<<psor_blocks, psor_threads, 0, stream>>>
(diffusivity_x.ptr(),
diffusivity_y.ptr(),
denom_u.ptr(),
denom_v.ptr(),
num_dudv.ptr(),
num_u.ptr(),
num_v.ptr(),
kLevelWidth,
kLevelHeight,
kLevelStride,
alpha,
gamma);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_x, diffusivity_x.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_y, diffusivity_y.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_dudv, num_dudv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_u, num_u.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_v, num_v.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
prepare_sor_stage_2<<<psor_blocks, psor_threads, 0, stream>>>(denom_u.ptr(), denom_v.ptr(), kLevelWidth, kLevelHeight, kLevelStride);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
// linear system coefficients
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_x, diffusivity_x.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_y, diffusivity_y.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_dudv, num_dudv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_u, num_u.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_v, num_v.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_inv_denominator_u, denom_u.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_inv_denominator_v, denom_v.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
//solve linear system
for (Ncv32u solver_iteration = 0; solver_iteration < desc.number_of_solver_iterations; ++solver_iteration)
{
float omega = 1.99f;
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
sor_pass<0><<<sor_blocks, sor_threads, 0, stream>>>
(du_new.ptr(),
dv_new.ptr(),
denom_u.ptr(),
denom_v.ptr(),
num_u.ptr(),
num_v.ptr(),
num_dudv.ptr(),
omega,
kLevelWidth,
kLevelHeight,
kLevelStride);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du_new.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv_new.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
sor_pass<1><<<sor_blocks, sor_threads, 0, stream>>>
(du.ptr(),
dv.ptr(),
denom_u.ptr(),
denom_v.ptr(),
num_u.ptr(),
num_v.ptr(),
num_dudv.ptr(),
omega,
kLevelWidth,
kLevelHeight,
kLevelStride);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
}//end of solver loop
}// end of inner loop
//update u and v
add(ptrU->ptr(), du.ptr(), kLevelSizeInPixels, stream);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
add(ptrV->ptr(), dv.ptr(), kLevelSizeInPixels, stream);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
//prolongate using texture
pyr.w.pop_back();
pyr.h.pop_back();
if (!pyr.w.empty())
{
//compute new image size
Ncv32u nw = pyr.w.back();
Ncv32u nh = pyr.h.back();
Ncv32u ns = alignUp(nw, kStrideAlignmentFloat);
dim3 p_blocks(iDivUp(nw, 32), iDivUp(nh, 8));
dim3 p_threads(32, 8);
NcvSize32u inner_srcSize (kLevelWidth, kLevelHeight);
NcvSize32u dstSize (nw, nh);
NcvRect32u srcROI (0, 0, kLevelWidth, kLevelHeight);
NcvRect32u dstROI (0, 0, nw, nh);
ncvAssertReturnNcvStat( nppiStResize_32f_C1R (ptrU->ptr(), inner_srcSize, kLevelStride * sizeof (float), srcROI,
ptrUNew->ptr(), dstSize, ns * sizeof (float), dstROI, 1.0f/scale_factor, 1.0f/scale_factor, nppStBicubic) );
ScaleVector(ptrUNew->ptr(), ptrUNew->ptr(), 1.0f/scale_factor, ns * nh, stream);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
ncvAssertReturnNcvStat( nppiStResize_32f_C1R (ptrV->ptr(), inner_srcSize, kLevelStride * sizeof (float), srcROI,
ptrVNew->ptr(), dstSize, ns * sizeof (float), dstROI, 1.0f/scale_factor, 1.0f/scale_factor, nppStBicubic) );
ScaleVector(ptrVNew->ptr(), ptrVNew->ptr(), 1.0f/scale_factor, ns * nh, stream);
ncvAssertCUDALastErrorReturn((int)NCV_CUDA_ERROR);
cv::gpu::device::swap<FloatVector*>(ptrU, ptrUNew);
cv::gpu::device::swap<FloatVector*>(ptrV, ptrVNew);
}
scale /= scale_factor;
}
// end of warping iterations
ncvAssertCUDAReturn(cudaStreamSynchronize(stream), (int)NCV_CUDA_ERROR);
ncvAssertCUDAReturn( cudaMemcpy2DAsync
(uOut.ptr(), uOut.pitch(), ptrU->ptr(),
kSourcePitch, kSourceWidth*sizeof(float), kSourceHeight, cudaMemcpyDeviceToDevice, stream), (int)NCV_CUDA_ERROR );
ncvAssertCUDAReturn( cudaMemcpy2DAsync
(vOut.ptr(), vOut.pitch(), ptrV->ptr(),
kSourcePitch, kSourceWidth*sizeof(float), kSourceHeight, cudaMemcpyDeviceToDevice, stream), (int)NCV_CUDA_ERROR );
ncvAssertCUDAReturn(cudaStreamSynchronize(stream), (int)NCV_CUDA_ERROR);
}
return NCV_SUCCESS;
}
#endif /* CUDA_DISABLER */ |
b0f0428fe55073aceb9fc3f990d9342b74c709c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "PairwiseDistance.hpp"
#include "PairwiseDistanceLocal.hpp"
static __device__ float* getPtr(const DeviceMatrix mat, unsigned int r,
unsigned int c)
{
return mat.data + r*mat.pitch + c;
}
/**
* @note This version does not coalesing memory reads *at all*, but
* should be a simple first pass.
*/
/* Unused - leaving here for reference
static __global__ void pairwiseDistanceKernel(DeviceMatrix a, DeviceMatrix b,
DeviceMatrix out)
{
int ry = blockIdx.y * blockDim.y + threadIdx.y;
int cx = blockIdx.x * blockDim.x + threadIdx.x;
if ((cx < out.width) & (ry < out.height)) {
float dst = 0;
for (int i=0; i < a.width; i++) {
float diff = *getPtr(a, ry, i) - *getPtr(b, cx, i);
dst += diff * diff;
}
*getPtr(out, ry, cx) = dst;
}
}
*/
static const unsigned int BLOCK_SIZE = 16;
/**
* @note This kernel is based on the blocked matrix multiply. We
* expect to be caleld with blockDim(BLOCK_SIZE, BLOCK_SIZE) and a
* sufficiently large grid to cover all othe output values.
*/
__global__ void pairwiseDistanceKernelGeneric(DeviceMatrix a, DeviceMatrix b,
DeviceMatrix out, const int type)
{
const int out_ry = blockIdx.y * BLOCK_SIZE + threadIdx.y;
const int out_cx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// Index used for reading b. We use the fact that our thread
// blocks are square here.
const int b_ry = blockIdx.x * BLOCK_SIZE + threadIdx.y;
__shared__ float a_cache[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float b_cache[BLOCK_SIZE][BLOCK_SIZE];
float dst = 0;
for (unsigned int i=0; i < a.width; i+=BLOCK_SIZE) {
int read_cx = i + threadIdx.x;
if (read_cx < a.width) {
if (out_ry < a.height) {
a_cache[threadIdx.y][threadIdx.x] =
*getPtr(a, out_ry, read_cx);
}
if (b_ry < b.height) {
b_cache[threadIdx.y][threadIdx.x] =
*getPtr(b, b_ry, read_cx);
}
}
__syncthreads();
int end = min(BLOCK_SIZE, (unsigned int)(a.width - i));
for (int k=0; k < end; k++) {
// if (type == EUCLIDEAN){
// float diff = a_cache[threadIdx.y][k] - b_cache[threadIdx.x][k];
// dst += diff * diff;
// }
// else if (type == DOTPRODUCT){
dst += a_cache[threadIdx.y][0] * b_cache[threadIdx.x][0];
dst += a_cache[threadIdx.y][1] * b_cache[threadIdx.x][1];
dst += a_cache[threadIdx.y][2] * b_cache[threadIdx.x][2];
dst += a_cache[threadIdx.y][3] * b_cache[threadIdx.x][3];
dst += a_cache[threadIdx.y][4] * b_cache[threadIdx.x][4];
dst += a_cache[threadIdx.y][5] * b_cache[threadIdx.x][5];
dst += a_cache[threadIdx.y][6] * b_cache[threadIdx.x][6];
dst += a_cache[threadIdx.y][7] * b_cache[threadIdx.x][7];
dst += a_cache[threadIdx.y][8] * b_cache[threadIdx.x][8];
// }
// else if (type == ABSDOTPRODUCT){
// dst += a_cache[threadIdx.y][k] * b_cache[threadIdx.x][k];
// }
// else if (type == CHISQUARED){
// float diff, sum;
// diff = a_cache[threadIdx.y][k] - b_cache[threadIdx.x][k];
// sum = a_cache[threadIdx.y][k] + b_cache[threadIdx.x][k];
// dst += diff * diff / sum;
// }
// else if (type == CITYBLOCK){
// dst += fabs(a_cache[threadIdx.y][k] - b_cache[threadIdx.x][k]);
// }
}
__syncthreads();
}
if ((out_cx < out.width) & (out_ry < out.height)) {
if (type == ABSDOTPRODUCT){
*getPtr(out, out_ry, out_cx) = abs(dst);
}
else {
*getPtr(out, out_ry, out_cx) = dst;
}
}
}
void pwdist_generic( const DeviceMatrix* features_train,
const DeviceMatrix* features_test,
DeviceMatrix* output,
int type) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
int grid_ry = (features_train->height-1) / dimBlock.y + 1;
int grid_cx = (features_test->height-1) / dimBlock.x + 1;
dim3 dimGrid(grid_cx, grid_ry);
hipLaunchKernelGGL(( pairwiseDistanceKernelGeneric), dim3(dimGrid), dim3(dimBlock), 0, 0, *features_train,
*features_test,
*output,
type);
hipDeviceSynchronize();
}
/**
* @note This is a simple first pass that does nothing to manage
* memory access
*/
__global__ void argminKernel(DeviceMatrix matrix, DeviceMatrix output)
{
int ry = blockIdx.y * blockDim.y + threadIdx.y;
if (ry < matrix.height) {
int argmin = 0;
float minval = *getPtr(matrix, ry, 0);
for (int cx=1; cx < matrix.width; cx++) {
float val = *getPtr(matrix, ry, cx);
if (val < minval) {
argmin = cx;
minval = val;
}
}
*getPtr(output, ry, 0) = argmin;
}
}
void argmin_cuda_local(const DeviceMatrix* matrix, DeviceMatrix* output)
{
dim3 dimBlock(1,256);
int grid_ry = (matrix->height-1) / dimBlock.y + 1;
int grid_cx = 1;
dim3 dimGrid(grid_cx, grid_ry);
hipLaunchKernelGGL(( argminKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, *matrix, *output);
}
__global__ void argmaxKernel(DeviceMatrix matrix, DeviceMatrix output)
{
int ry = blockIdx.y * blockDim.y + threadIdx.y;
if (ry < matrix.height) {
int argmax = 0;
float maxval = *getPtr(matrix, ry, 0);
for (int cx=1; cx < matrix.width; cx++) {
float val = *getPtr(matrix, ry, cx);
if (val > maxval) {
argmax = cx;
maxval = val;
}
}
*getPtr(output, ry, 0) = argmax;
}
}
void argmax_cuda_local(const DeviceMatrix* matrix, DeviceMatrix* output)
{
dim3 dimBlock(1,256);
int grid_ry = (matrix->height-1) / dimBlock.y + 1;
int grid_cx = 1;
dim3 dimGrid(grid_cx, grid_ry);
hipLaunchKernelGGL(( argmaxKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, *matrix, *output);
}
__global__ void minKernel(DeviceMatrix matrix, DeviceMatrix output)
{
int ry = blockIdx.y * blockDim.y + threadIdx.y;
if (ry < matrix.height) {
float minval = *getPtr(matrix, ry, 0);
for (int cx=1; cx < matrix.width; cx++) {
float val = *getPtr(matrix, ry, cx);
if (val < minval) {
minval = val;
}
}
*getPtr(output, ry, 0) = minval;
}
}
void min_cuda_local(const DeviceMatrix* matrix, DeviceMatrix* output)
{
dim3 dimBlock(1,256);
int grid_ry = (matrix->height-1) / dimBlock.y + 1;
int grid_cx = 1;
dim3 dimGrid(grid_cx, grid_ry);
hipLaunchKernelGGL(( minKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, *matrix, *output);
}
__global__ void maxKernel(DeviceMatrix matrix, DeviceMatrix output)
{
int ry = blockIdx.y * blockDim.y + threadIdx.y;
if (ry < matrix.height) {
float maxval = *getPtr(matrix, ry, 0);
for (int cx=1; cx < matrix.width; cx++) {
float val = *getPtr(matrix, ry, cx);
if (val > maxval) {
maxval = val;
}
}
*getPtr(output, ry, 0) = maxval;
}
}
void max_cuda_local(const DeviceMatrix* matrix, DeviceMatrix* output)
{
dim3 dimBlock(1,256);
int grid_ry = (matrix->height-1) / dimBlock.y + 1;
int grid_cx = 1;
dim3 dimGrid(grid_cx, grid_ry);
hipLaunchKernelGGL(( maxKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, *matrix, *output);
}
| b0f0428fe55073aceb9fc3f990d9342b74c709c5.cu | #include "PairwiseDistance.hpp"
#include "PairwiseDistanceLocal.hpp"
static __device__ float* getPtr(const DeviceMatrix mat, unsigned int r,
unsigned int c)
{
return mat.data + r*mat.pitch + c;
}
/**
* @note This version does not coalesing memory reads *at all*, but
* should be a simple first pass.
*/
/* Unused - leaving here for reference
static __global__ void pairwiseDistanceKernel(DeviceMatrix a, DeviceMatrix b,
DeviceMatrix out)
{
int ry = blockIdx.y * blockDim.y + threadIdx.y;
int cx = blockIdx.x * blockDim.x + threadIdx.x;
if ((cx < out.width) & (ry < out.height)) {
float dst = 0;
for (int i=0; i < a.width; i++) {
float diff = *getPtr(a, ry, i) - *getPtr(b, cx, i);
dst += diff * diff;
}
*getPtr(out, ry, cx) = dst;
}
}
*/
static const unsigned int BLOCK_SIZE = 16;
/**
* @note This kernel is based on the blocked matrix multiply. We
* expect to be caleld with blockDim(BLOCK_SIZE, BLOCK_SIZE) and a
* sufficiently large grid to cover all othe output values.
*/
__global__ void pairwiseDistanceKernelGeneric(DeviceMatrix a, DeviceMatrix b,
DeviceMatrix out, const int type)
{
const int out_ry = blockIdx.y * BLOCK_SIZE + threadIdx.y;
const int out_cx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// Index used for reading b. We use the fact that our thread
// blocks are square here.
const int b_ry = blockIdx.x * BLOCK_SIZE + threadIdx.y;
__shared__ float a_cache[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float b_cache[BLOCK_SIZE][BLOCK_SIZE];
float dst = 0;
for (unsigned int i=0; i < a.width; i+=BLOCK_SIZE) {
int read_cx = i + threadIdx.x;
if (read_cx < a.width) {
if (out_ry < a.height) {
a_cache[threadIdx.y][threadIdx.x] =
*getPtr(a, out_ry, read_cx);
}
if (b_ry < b.height) {
b_cache[threadIdx.y][threadIdx.x] =
*getPtr(b, b_ry, read_cx);
}
}
__syncthreads();
int end = min(BLOCK_SIZE, (unsigned int)(a.width - i));
for (int k=0; k < end; k++) {
// if (type == EUCLIDEAN){
// float diff = a_cache[threadIdx.y][k] - b_cache[threadIdx.x][k];
// dst += diff * diff;
// }
// else if (type == DOTPRODUCT){
dst += a_cache[threadIdx.y][0] * b_cache[threadIdx.x][0];
dst += a_cache[threadIdx.y][1] * b_cache[threadIdx.x][1];
dst += a_cache[threadIdx.y][2] * b_cache[threadIdx.x][2];
dst += a_cache[threadIdx.y][3] * b_cache[threadIdx.x][3];
dst += a_cache[threadIdx.y][4] * b_cache[threadIdx.x][4];
dst += a_cache[threadIdx.y][5] * b_cache[threadIdx.x][5];
dst += a_cache[threadIdx.y][6] * b_cache[threadIdx.x][6];
dst += a_cache[threadIdx.y][7] * b_cache[threadIdx.x][7];
dst += a_cache[threadIdx.y][8] * b_cache[threadIdx.x][8];
// }
// else if (type == ABSDOTPRODUCT){
// dst += a_cache[threadIdx.y][k] * b_cache[threadIdx.x][k];
// }
// else if (type == CHISQUARED){
// float diff, sum;
// diff = a_cache[threadIdx.y][k] - b_cache[threadIdx.x][k];
// sum = a_cache[threadIdx.y][k] + b_cache[threadIdx.x][k];
// dst += diff * diff / sum;
// }
// else if (type == CITYBLOCK){
// dst += fabs(a_cache[threadIdx.y][k] - b_cache[threadIdx.x][k]);
// }
}
__syncthreads();
}
if ((out_cx < out.width) & (out_ry < out.height)) {
if (type == ABSDOTPRODUCT){
*getPtr(out, out_ry, out_cx) = abs(dst);
}
else {
*getPtr(out, out_ry, out_cx) = dst;
}
}
}
void pwdist_generic( const DeviceMatrix* features_train,
const DeviceMatrix* features_test,
DeviceMatrix* output,
int type) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
int grid_ry = (features_train->height-1) / dimBlock.y + 1;
int grid_cx = (features_test->height-1) / dimBlock.x + 1;
dim3 dimGrid(grid_cx, grid_ry);
pairwiseDistanceKernelGeneric<<<dimGrid, dimBlock>>>(*features_train,
*features_test,
*output,
type);
cudaThreadSynchronize();
}
/**
* @note This is a simple first pass that does nothing to manage
* memory access
*/
__global__ void argminKernel(DeviceMatrix matrix, DeviceMatrix output)
{
int ry = blockIdx.y * blockDim.y + threadIdx.y;
if (ry < matrix.height) {
int argmin = 0;
float minval = *getPtr(matrix, ry, 0);
for (int cx=1; cx < matrix.width; cx++) {
float val = *getPtr(matrix, ry, cx);
if (val < minval) {
argmin = cx;
minval = val;
}
}
*getPtr(output, ry, 0) = argmin;
}
}
void argmin_cuda_local(const DeviceMatrix* matrix, DeviceMatrix* output)
{
dim3 dimBlock(1,256);
int grid_ry = (matrix->height-1) / dimBlock.y + 1;
int grid_cx = 1;
dim3 dimGrid(grid_cx, grid_ry);
argminKernel<<<dimGrid, dimBlock>>>(*matrix, *output);
}
__global__ void argmaxKernel(DeviceMatrix matrix, DeviceMatrix output)
{
int ry = blockIdx.y * blockDim.y + threadIdx.y;
if (ry < matrix.height) {
int argmax = 0;
float maxval = *getPtr(matrix, ry, 0);
for (int cx=1; cx < matrix.width; cx++) {
float val = *getPtr(matrix, ry, cx);
if (val > maxval) {
argmax = cx;
maxval = val;
}
}
*getPtr(output, ry, 0) = argmax;
}
}
void argmax_cuda_local(const DeviceMatrix* matrix, DeviceMatrix* output)
{
dim3 dimBlock(1,256);
int grid_ry = (matrix->height-1) / dimBlock.y + 1;
int grid_cx = 1;
dim3 dimGrid(grid_cx, grid_ry);
argmaxKernel<<<dimGrid, dimBlock>>>(*matrix, *output);
}
__global__ void minKernel(DeviceMatrix matrix, DeviceMatrix output)
{
int ry = blockIdx.y * blockDim.y + threadIdx.y;
if (ry < matrix.height) {
float minval = *getPtr(matrix, ry, 0);
for (int cx=1; cx < matrix.width; cx++) {
float val = *getPtr(matrix, ry, cx);
if (val < minval) {
minval = val;
}
}
*getPtr(output, ry, 0) = minval;
}
}
void min_cuda_local(const DeviceMatrix* matrix, DeviceMatrix* output)
{
dim3 dimBlock(1,256);
int grid_ry = (matrix->height-1) / dimBlock.y + 1;
int grid_cx = 1;
dim3 dimGrid(grid_cx, grid_ry);
minKernel<<<dimGrid, dimBlock>>>(*matrix, *output);
}
__global__ void maxKernel(DeviceMatrix matrix, DeviceMatrix output)
{
int ry = blockIdx.y * blockDim.y + threadIdx.y;
if (ry < matrix.height) {
float maxval = *getPtr(matrix, ry, 0);
for (int cx=1; cx < matrix.width; cx++) {
float val = *getPtr(matrix, ry, cx);
if (val > maxval) {
maxval = val;
}
}
*getPtr(output, ry, 0) = maxval;
}
}
void max_cuda_local(const DeviceMatrix* matrix, DeviceMatrix* output)
{
dim3 dimBlock(1,256);
int grid_ry = (matrix->height-1) / dimBlock.y + 1;
int grid_cx = 1;
dim3 dimGrid(grid_cx, grid_ry);
maxKernel<<<dimGrid, dimBlock>>>(*matrix, *output);
}
|
3d2f6c6038d8d9273b7628448044aedd60bae1f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/layers/dot_product_layer.hpp"
#include "HugeCTR/include/utils.cuh"
#include "HugeCTR/include/utils.hpp"
#include <algorithm>
#include <functional>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
namespace {
#define BLOCK_DIM_SIZE 32
template <typename T>
__global__ void dot_product_kernel(T** inputs, T* output, int size, int num) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
T tmp = 1;
for (int i = 0; i < num; i++) {
tmp *= inputs[i][tid];
}
output[tid] = tmp;
}
}
template <typename T>
__global__ void dot_product_dgrad_kernel(const T* top_grad, T** dgrads, T* fprop_output, int size,
int num) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
for (int i = 0; i < num; ++i) {
if (0 == fprop_output[tid]) {
dgrads[i][tid] = 0;
} else {
T d_input = dgrads[i][tid];
dgrads[i][tid] = top_grad[tid] * ((float)fprop_output[tid] / d_input);
}
}
}
}
} // end of namespace
DotProductLayer::DotProductLayer(const Tensors2<float>& in_tensors,
const Tensor2<float>& out_tensor,
const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff,
const std::shared_ptr<GPUResource>& gpu_resource)
: Layer(gpu_resource) {
try {
size_ = in_tensors[0].get_num_elements();
num_ = in_tensors.size();
// error input checking
auto dims = in_tensors[0].get_dimensions();
if (num_ < 2) {
CK_THROW_(Error_t::WrongInput, "DotProductLayer needs at least 2 input tensors");
}
for (size_t i = 1; i < num_; i++) {
if (in_tensors[i].get_dimensions().size() != dims.size()) {
CK_THROW_(Error_t::WrongInput, "All the input tensors must have the same num of dims");
}
for (unsigned int j = 0; j < dims.size(); j++) {
if (in_tensors[i].get_dimensions()[j] != dims[j]) {
CK_THROW_(Error_t::WrongInput, "All the input tensors must have the same dims");
}
}
}
for (size_t i = 0; i < num_; i++) {
in_tensors_.push_back(in_tensors[i]);
}
out_tensors_.push_back(out_tensor);
blobs_buff->reserve({num_}, &d_inputs_);
blobs_buff->reserve(out_tensor.get_dimensions(), &fprop_output_);
} catch (const std::runtime_error& rt_err) {
std::cerr << rt_err.what() << std::endl;
throw;
}
}
void DotProductLayer::initialize() {
std::shared_ptr<GeneralBuffer2<CudaHostAllocator>> pinned_host_buf =
GeneralBuffer2<CudaHostAllocator>::create();
pinned_host_buf->reserve({num_}, &h_inputs_);
pinned_host_buf->allocate();
}
void DotProductLayer::fprop(bool is_train) {
CudaDeviceContext context(get_device_id());
if (!initialized_) {
for (size_t i = 0; i < num_; i++) {
h_inputs_.get_ptr()[i] = in_tensors_[i].get_ptr();
}
CK_CUDA_THROW_(hipMemcpyAsync((void*)d_inputs_.get_ptr(), (void*)h_inputs_.get_ptr(),
num_ * sizeof(float*), hipMemcpyHostToDevice,
get_gpu().get_stream()));
initialized_ = true;
}
float* output = out_tensors_[0].get_ptr();
dim3 blockSize(256, 1, 1);
dim3 gridSize((size_ + blockSize.x - 1) / blockSize.x, 1, 1);
hipLaunchKernelGGL(( dot_product_kernel), dim3(gridSize), dim3(blockSize), 0, get_gpu().get_stream(), d_inputs_.get_ptr(),
output, size_, num_);
CK_CUDA_THROW_(hipMemcpyAsync((void*)fprop_output_.get_ptr(), (void*)output,
out_tensors_[0].get_size_in_bytes(), hipMemcpyDeviceToDevice,
get_gpu().get_stream()));
}
void DotProductLayer::bprop() {
CudaDeviceContext context(get_device_id());
if (!initialized_) {
for (size_t i = 0; i < num_; i++) {
h_inputs_.get_ptr()[i] = in_tensors_[i].get_ptr();
}
CK_CUDA_THROW_(hipMemcpyAsync((void*)d_inputs_.get_ptr(), (void*)h_inputs_.get_ptr(),
num_ * sizeof(float*), hipMemcpyHostToDevice,
get_gpu().get_stream()));
initialized_ = true;
}
float* output = out_tensors_[0].get_ptr();
dim3 blockSize(256, 1, 1);
dim3 gridSize((size_ + blockSize.x - 1) / blockSize.x, 1, 1);
hipLaunchKernelGGL(( dot_product_dgrad_kernel), dim3(gridSize), dim3(blockSize), 0, get_gpu().get_stream(),
output, d_inputs_.get_ptr(), fprop_output_.get_ptr(), size_, num_);
}
} // namespace HugeCTR
| 3d2f6c6038d8d9273b7628448044aedd60bae1f3.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/layers/dot_product_layer.hpp"
#include "HugeCTR/include/utils.cuh"
#include "HugeCTR/include/utils.hpp"
#include <algorithm>
#include <functional>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
namespace {
#define BLOCK_DIM_SIZE 32
template <typename T>
__global__ void dot_product_kernel(T** inputs, T* output, int size, int num) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
T tmp = 1;
for (int i = 0; i < num; i++) {
tmp *= inputs[i][tid];
}
output[tid] = tmp;
}
}
template <typename T>
__global__ void dot_product_dgrad_kernel(const T* top_grad, T** dgrads, T* fprop_output, int size,
int num) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
for (int i = 0; i < num; ++i) {
if (0 == fprop_output[tid]) {
dgrads[i][tid] = 0;
} else {
T d_input = dgrads[i][tid];
dgrads[i][tid] = top_grad[tid] * ((float)fprop_output[tid] / d_input);
}
}
}
}
} // end of namespace
DotProductLayer::DotProductLayer(const Tensors2<float>& in_tensors,
const Tensor2<float>& out_tensor,
const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff,
const std::shared_ptr<GPUResource>& gpu_resource)
: Layer(gpu_resource) {
try {
size_ = in_tensors[0].get_num_elements();
num_ = in_tensors.size();
// error input checking
auto dims = in_tensors[0].get_dimensions();
if (num_ < 2) {
CK_THROW_(Error_t::WrongInput, "DotProductLayer needs at least 2 input tensors");
}
for (size_t i = 1; i < num_; i++) {
if (in_tensors[i].get_dimensions().size() != dims.size()) {
CK_THROW_(Error_t::WrongInput, "All the input tensors must have the same num of dims");
}
for (unsigned int j = 0; j < dims.size(); j++) {
if (in_tensors[i].get_dimensions()[j] != dims[j]) {
CK_THROW_(Error_t::WrongInput, "All the input tensors must have the same dims");
}
}
}
for (size_t i = 0; i < num_; i++) {
in_tensors_.push_back(in_tensors[i]);
}
out_tensors_.push_back(out_tensor);
blobs_buff->reserve({num_}, &d_inputs_);
blobs_buff->reserve(out_tensor.get_dimensions(), &fprop_output_);
} catch (const std::runtime_error& rt_err) {
std::cerr << rt_err.what() << std::endl;
throw;
}
}
void DotProductLayer::initialize() {
std::shared_ptr<GeneralBuffer2<CudaHostAllocator>> pinned_host_buf =
GeneralBuffer2<CudaHostAllocator>::create();
pinned_host_buf->reserve({num_}, &h_inputs_);
pinned_host_buf->allocate();
}
void DotProductLayer::fprop(bool is_train) {
CudaDeviceContext context(get_device_id());
if (!initialized_) {
for (size_t i = 0; i < num_; i++) {
h_inputs_.get_ptr()[i] = in_tensors_[i].get_ptr();
}
CK_CUDA_THROW_(cudaMemcpyAsync((void*)d_inputs_.get_ptr(), (void*)h_inputs_.get_ptr(),
num_ * sizeof(float*), cudaMemcpyHostToDevice,
get_gpu().get_stream()));
initialized_ = true;
}
float* output = out_tensors_[0].get_ptr();
dim3 blockSize(256, 1, 1);
dim3 gridSize((size_ + blockSize.x - 1) / blockSize.x, 1, 1);
dot_product_kernel<<<gridSize, blockSize, 0, get_gpu().get_stream()>>>(d_inputs_.get_ptr(),
output, size_, num_);
CK_CUDA_THROW_(cudaMemcpyAsync((void*)fprop_output_.get_ptr(), (void*)output,
out_tensors_[0].get_size_in_bytes(), cudaMemcpyDeviceToDevice,
get_gpu().get_stream()));
}
void DotProductLayer::bprop() {
CudaDeviceContext context(get_device_id());
if (!initialized_) {
for (size_t i = 0; i < num_; i++) {
h_inputs_.get_ptr()[i] = in_tensors_[i].get_ptr();
}
CK_CUDA_THROW_(cudaMemcpyAsync((void*)d_inputs_.get_ptr(), (void*)h_inputs_.get_ptr(),
num_ * sizeof(float*), cudaMemcpyHostToDevice,
get_gpu().get_stream()));
initialized_ = true;
}
float* output = out_tensors_[0].get_ptr();
dim3 blockSize(256, 1, 1);
dim3 gridSize((size_ + blockSize.x - 1) / blockSize.x, 1, 1);
dot_product_dgrad_kernel<<<gridSize, blockSize, 0, get_gpu().get_stream()>>>(
output, d_inputs_.get_ptr(), fprop_output_.get_ptr(), size_, num_);
}
} // namespace HugeCTR
|
e661191c22b069d6f773411e363e6b9edfbd0cc6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "wb.h"
//@@ define error checking macro here.
#define errCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
printErrorLog(ERROR, "Failed to run stmt ", #stmt); \
printErrorLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
//@@ INSERT CODE HERE
__global__ void colorToGrayscale(float *inputImage, float *outputImage, int *imageHeight, int *imageWidth)
{
int x = 3 * threadIdx.x;
int y = 3 * blockIdx.x;
float average = (inputImage[y * (*imageWidth) + x] * 0.21 + inputImage[y * (*imageWidth) + x + 1] * 0.71 + inputImage[y * (*imageWidth) + x + 2] * 0.07);
outputImage[y / 3 * (*imageWidth) + x / 3] = average;
}
int main(int argc, char *argv[]) {
int imageChannels;
int imageWidth;
int imageHeight;
char *inputImageFile;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *deviceInputImageData;
float *deviceOutputImageData;
int *deviceImageHeight;
int *deviceImageWidth;
/* parse the input arguments */
//@@ Insert code here
if(argc != 3)
{
printf("Invalid arguments, format should be: \ninput.ppm expected.ppm");
return 0;
}
wbArg_t args = wbArg_read(argc, argv);
inputImageFile = wbArg_getInputFile(args, 0);
inputImage = wbImport(inputImageFile);
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
// For this lab the value is always 3
imageChannels = wbImage_getChannels(inputImage);
// Since the image is monochromatic, it only contains one channel
outputImage = wbImage_new(imageWidth, imageHeight, 1);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
hipMalloc((void **)&deviceInputImageData,
imageWidth * imageHeight * imageChannels * sizeof(float));
hipMalloc((void **)&deviceOutputImageData,
imageWidth * imageHeight * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
hipMemcpy(deviceInputImageData, hostInputImageData,
imageWidth * imageHeight * imageChannels * sizeof(float),
hipMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
///////////////////////////////////////////////////////
wbTime_start(Compute, "Doing the computation on the GPU");
//@@ INSERT CODE HERE
hipMalloc((void **)&deviceImageHeight, sizeof(int));
hipMalloc((void **)&deviceImageWidth, sizeof(int));
hipMemcpy(deviceImageHeight, &imageHeight, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(deviceImageWidth, &imageWidth, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( colorToGrayscale), dim3(imageHeight), dim3(imageWidth), 0, 0, deviceInputImageData, deviceOutputImageData, deviceImageHeight, deviceImageWidth);
wbTime_stop(Compute, "Doing the computation on the GPU");
///////////////////////////////////////////////////////
wbTime_start(Copy, "Copying data from the GPU");
hipMemcpy(hostOutputImageData, deviceOutputImageData,
imageWidth * imageHeight * sizeof(float),
hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
wbSolution(args, outputImage);
hipFree(deviceInputImageData);
hipFree(deviceOutputImageData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
return 0;
}
| e661191c22b069d6f773411e363e6b9edfbd0cc6.cu | #include "wb.h"
//@@ define error checking macro here.
#define errCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
printErrorLog(ERROR, "Failed to run stmt ", #stmt); \
printErrorLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
//@@ INSERT CODE HERE
__global__ void colorToGrayscale(float *inputImage, float *outputImage, int *imageHeight, int *imageWidth)
{
int x = 3 * threadIdx.x;
int y = 3 * blockIdx.x;
float average = (inputImage[y * (*imageWidth) + x] * 0.21 + inputImage[y * (*imageWidth) + x + 1] * 0.71 + inputImage[y * (*imageWidth) + x + 2] * 0.07);
outputImage[y / 3 * (*imageWidth) + x / 3] = average;
}
int main(int argc, char *argv[]) {
int imageChannels;
int imageWidth;
int imageHeight;
char *inputImageFile;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *deviceInputImageData;
float *deviceOutputImageData;
int *deviceImageHeight;
int *deviceImageWidth;
/* parse the input arguments */
//@@ Insert code here
if(argc != 3)
{
printf("Invalid arguments, format should be: \ninput.ppm expected.ppm");
return 0;
}
wbArg_t args = wbArg_read(argc, argv);
inputImageFile = wbArg_getInputFile(args, 0);
inputImage = wbImport(inputImageFile);
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
// For this lab the value is always 3
imageChannels = wbImage_getChannels(inputImage);
// Since the image is monochromatic, it only contains one channel
outputImage = wbImage_new(imageWidth, imageHeight, 1);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
cudaMalloc((void **)&deviceInputImageData,
imageWidth * imageHeight * imageChannels * sizeof(float));
cudaMalloc((void **)&deviceOutputImageData,
imageWidth * imageHeight * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
cudaMemcpy(deviceInputImageData, hostInputImageData,
imageWidth * imageHeight * imageChannels * sizeof(float),
cudaMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
///////////////////////////////////////////////////////
wbTime_start(Compute, "Doing the computation on the GPU");
//@@ INSERT CODE HERE
cudaMalloc((void **)&deviceImageHeight, sizeof(int));
cudaMalloc((void **)&deviceImageWidth, sizeof(int));
cudaMemcpy(deviceImageHeight, &imageHeight, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(deviceImageWidth, &imageWidth, sizeof(int), cudaMemcpyHostToDevice);
colorToGrayscale<<<imageHeight, imageWidth>>>(deviceInputImageData, deviceOutputImageData, deviceImageHeight, deviceImageWidth);
wbTime_stop(Compute, "Doing the computation on the GPU");
///////////////////////////////////////////////////////
wbTime_start(Copy, "Copying data from the GPU");
cudaMemcpy(hostOutputImageData, deviceOutputImageData,
imageWidth * imageHeight * sizeof(float),
cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
wbSolution(args, outputImage);
cudaFree(deviceInputImageData);
cudaFree(deviceOutputImageData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
return 0;
}
|
1b90cad27aadf49268a38fd36124b83d49d6f499.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "structs.cuh"
#include "semaphore.h"
#include "LBMconsts.cuh"
#include "phys.h"
#include "compact-steps.cuh"
#include "lbm-steps.cuh"
void debug_print();
__managed__ float* img_buf;
__managed__ cuDevTimers dev_times;
void calcConeFold(int it, std::vector<double>& timings){
cuTimer t0;
using namespace CompStep;
const size_t shmem_size = Nb.x*Nb.y*Nb.z*Cell::Qn*sizeof(ftype);
printf("Required Shmem size=%ld\n", shmem_size);
CHECK_ERROR(hipFuncSetAttribute(compactStepConeFold, hipFuncAttributeMaxDynamicSharedMemorySize, shmem_size));
static_assert(Nx%Nb.x==0);
static_assert(Ny%Nb.y==0);
static_assert(Nz%Nb.z==0);
static_assert(Nx>=Ny);
static_assert(Ny>=Nz);
img_buf = parsHost.arr4im.Arr3Dbuf;
#if DRAW_WAVEFRONT>1
const int period = Nx/Nb.x;
int ix=Nx-Nb.x-(parsHost.iStep-1)%period*Nb.x; {
#else
for(int ix=Nx-Nb.x; ix>=0; ix-=Nb.x) {
#endif
#ifdef ENABLE_DEVICE_TIMERS
dev_times.reset();
#endif
hipLaunchKernelGGL(( compactStepConeFold) , dim3(Ny/Nb.y*Nz/Nb.z), dim3(CompStep::Nblk),shmem_size , 0, ix);
//compactStepConeFold<<<Ny/Nb.y*Nz/Nb.z,dim3(Nb.x,Nb.y,Nb.z),shmem_size>>>(ix);
hipDeviceSynchronize(); CHECK_ERROR( hipGetLastError() );
#ifdef ENABLE_DEVICE_TIMERS
printf("Average device clocks, steps: ");
for(int i=0; i<dev_times.N; i++) printf("%g ", double(dev_times.clocks[i])/(Ny/Nb.y*Nz/Nb.z*parsHost.Nt) );
printf("\n");
#endif
}
debug_print(); timings.push_back( t0.getlaptime() );
}
| 1b90cad27aadf49268a38fd36124b83d49d6f499.cu | #include "structs.cuh"
#include "semaphore.h"
#include "LBMconsts.cuh"
#include "phys.h"
#include "compact-steps.cuh"
#include "lbm-steps.cuh"
void debug_print();
__managed__ float* img_buf;
__managed__ cuDevTimers dev_times;
void calcConeFold(int it, std::vector<double>& timings){
cuTimer t0;
using namespace CompStep;
const size_t shmem_size = Nb.x*Nb.y*Nb.z*Cell::Qn*sizeof(ftype);
printf("Required Shmem size=%ld\n", shmem_size);
CHECK_ERROR(cudaFuncSetAttribute(compactStepConeFold, cudaFuncAttributeMaxDynamicSharedMemorySize, shmem_size));
static_assert(Nx%Nb.x==0);
static_assert(Ny%Nb.y==0);
static_assert(Nz%Nb.z==0);
static_assert(Nx>=Ny);
static_assert(Ny>=Nz);
img_buf = parsHost.arr4im.Arr3Dbuf;
#if DRAW_WAVEFRONT>1
const int period = Nx/Nb.x;
int ix=Nx-Nb.x-(parsHost.iStep-1)%period*Nb.x; {
#else
for(int ix=Nx-Nb.x; ix>=0; ix-=Nb.x) {
#endif
#ifdef ENABLE_DEVICE_TIMERS
dev_times.reset();
#endif
compactStepConeFold <<< Ny/Nb.y*Nz/Nb.z, CompStep::Nblk,shmem_size >>> (ix);
//compactStepConeFold<<<Ny/Nb.y*Nz/Nb.z,dim3(Nb.x,Nb.y,Nb.z),shmem_size>>>(ix);
cudaDeviceSynchronize(); CHECK_ERROR( cudaGetLastError() );
#ifdef ENABLE_DEVICE_TIMERS
printf("Average device clocks, steps: ");
for(int i=0; i<dev_times.N; i++) printf("%g ", double(dev_times.clocks[i])/(Ny/Nb.y*Nz/Nb.z*parsHost.Nt) );
printf("\n");
#endif
}
debug_print(); timings.push_back( t0.getlaptime() );
}
|
c9d03e6cd40ac3980e2e6804d4f598e919d127cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This is the REAL "hello world" for CUDA!
// It takes the string "Hello ", prints it, then passes it to CUDA
// with an array of offsets. Then the offsets are added in parallel
// to produce the string "World!"
// By Ingemar Ragnemalm 2010
// https://www.computer-graphics.se/hello-world-for-cuda.html
#include <stdio.h>
const int N = 16;
const int blocksize = 16;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
// Gives cuda access to memory that ad/bd are pointing to
hipMalloc( (void**)&ad, csize );
hipMalloc( (void**)&bd, isize );
// copies csize bytes from a to ad
hipMemcpy( ad, a, csize, hipMemcpyHostToDevice );
hipMemcpy( bd, b, isize, hipMemcpyHostToDevice );
// Cuda integer vector type, dim1=1, dim2=1, dim3=1
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
// grid, block (params)
// grid = block * threads/block
hipLaunchKernelGGL(( hello), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd);
hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost );
// Allows memory to be reused, not a full free just cuda free
hipFree( ad );
hipFree( bd );
printf("%s\n", a);
return EXIT_SUCCESS;
}
| c9d03e6cd40ac3980e2e6804d4f598e919d127cb.cu | // This is the REAL "hello world" for CUDA!
// It takes the string "Hello ", prints it, then passes it to CUDA
// with an array of offsets. Then the offsets are added in parallel
// to produce the string "World!"
// By Ingemar Ragnemalm 2010
// https://www.computer-graphics.se/hello-world-for-cuda.html
#include <stdio.h>
const int N = 16;
const int blocksize = 16;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
// Gives cuda access to memory that ad/bd are pointing to
cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
// copies csize bytes from a to ad
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
// Cuda integer vector type, dim1=1, dim2=1, dim3=1
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
// grid, block (params)
// grid = block * threads/block
hello<<<dimGrid, dimBlock>>>(ad, bd);
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
// Allows memory to be reused, not a full free just cuda free
cudaFree( ad );
cudaFree( bd );
printf("%s\n", a);
return EXIT_SUCCESS;
}
|
6da5df88930fbb1271540d9995799411090b3981.hip | // !!! This is a file automatically generated by hipify!!!
/* This code accompanies
* Two relaxation time lattice Boltzmann method coupled to fast Fourier transform Poisson solver: Application to electroconvective flow, Journal of Computational Physics
* https://doi.org/10.1016/j.jcp.2019.07.029
* Numerical analysis of electroconvection in cross-flow with unipolar charge injection, Physical Review Fluids
* https://doi.org/10.1103/PhysRevFluids.4.103701
*
* Yifei Guan, Igor Novosselov
* University of Washington
*
* Author: Yifei Guan
*
*/
#define _CRT_SECURE_NO_WARNINGS
#include <stdio.h>
#include <stdlib.h>
#include "seconds.h"
#include "LBM.h"
#include "LBM.cu"
#include "poisson.cu"
#include <hip/hip_runtime.h>
#include <hipfft.h>
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
int main(int argc, char* argv[])
{
hipMemcpyFromSymbol(&dt_host, dt, sizeof(double), 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&Lx_host, Lx, sizeof(double), 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&Ly_host, Ly, sizeof(double), 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&dy_host, dy, sizeof(double), 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&voltage_host, voltage, sizeof(double), 0, hipMemcpyDeviceToHost);
hipMemcpyToSymbol(nu, &nu_host, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(uw, &uw_host, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(exf, &exf_host, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(K, &K_host, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(D, &D_host, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(G, &G_host, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(Beta, &Beta_host, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(TH, &TH_host, sizeof(double), 0, hipMemcpyHostToDevice);
// Compute parameters
compute_parameters(T, M, C, Fe, Ra, Pr);
printf("Simulating Electro-Thermo-convection in 2D\n");
printf(" domain size: %ux%u\n",NX,NY);
printf(" T: %g\n",*T);
printf(" M: %g\n",*M);
printf(" C: %g\n",*C);
printf(" Fe: %g\n",*Fe);
printf(" Ra: %g\n", *Ra);
printf(" Pr: %g\n", *Pr);
printf(" timesteps: %u\n",NSTEPS);
printf(" save every: %u\n",NSAVE);
printf(" message every: %u\n",NMSG);
printf("\n");
double bytesPerMiB = 1024.0*1024.0;
double bytesPerGiB = 1024.0*1024.0*1024.0;
checkCudaErrors(hipSetDevice(0));
int deviceId = 0;
checkCudaErrors(hipGetDevice(&deviceId));
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, deviceId));
size_t gpu_free_mem, gpu_total_mem;
checkCudaErrors(hipMemGetInfo(&gpu_free_mem,&gpu_total_mem));
printf("CUDA information\n");
printf(" using device: %d\n", deviceId);
printf(" name: %s\n",deviceProp.name);
printf(" multiprocessors: %d\n",deviceProp.multiProcessorCount);
printf(" compute capability: %d.%d\n",deviceProp.major,deviceProp.minor);
printf(" global memory: %.1f MiB\n",deviceProp.totalGlobalMem/bytesPerMiB);
printf(" free memory: %.1f MiB\n",gpu_free_mem/bytesPerMiB);
printf("\n");
// storage of f0 at upper and lower plate
checkCudaErrors(hipMalloc((void**)&f0bc, sizeof(double)*NX * 2));
//double *prop_gpu;
// microscopic variables
checkCudaErrors(hipMalloc((void**)&f0_gpu, mem_size_0dir));
checkCudaErrors(hipMalloc((void**)&f1_gpu, mem_size_n0dir));
checkCudaErrors(hipMalloc((void**)&f2_gpu, mem_size_n0dir));
checkCudaErrors(hipMalloc((void**)&h0_gpu, mem_size_0dir));
checkCudaErrors(hipMalloc((void**)&h1_gpu, mem_size_n0dir));
checkCudaErrors(hipMalloc((void**)&h2_gpu, mem_size_n0dir));
checkCudaErrors(hipMalloc((void**)&temp0_gpu, mem_size_0dir));
checkCudaErrors(hipMalloc((void**)&temp1_gpu, mem_size_n0dir));
checkCudaErrors(hipMalloc((void**)&temp2_gpu, mem_size_n0dir));
// macroscopic variables
checkCudaErrors(hipMalloc((void**)&rho_gpu, mem_size_scalar));
checkCudaErrors(hipMalloc((void**)&ux_gpu, mem_size_scalar));
checkCudaErrors(hipMalloc((void**)&uy_gpu, mem_size_scalar));
checkCudaErrors(hipMalloc((void**)&charge_gpu, mem_size_scalar));
checkCudaErrors(hipMalloc((void**)&phi_gpu, mem_size_scalar));
checkCudaErrors(hipMalloc((void**)&T_gpu, mem_size_scalar));
checkCudaErrors(hipMalloc((void**)&Ex_gpu, mem_size_scalar));
checkCudaErrors(hipMalloc((void**)&Ey_gpu, mem_size_scalar));
checkCudaErrors(hipMalloc((void**)&kx, sizeof(double)*NX));
checkCudaErrors(hipMalloc((void**)&ky, sizeof(double)*NE));
double *kx_host = (double*)malloc(sizeof(double)*NX);
double *ky_host = (double*)malloc(sizeof(double)*NE);
// Setup the cuFFT plan
CHECK_CUFFT(hipfftPlan2d(&plan, NE, NX, HIPFFT_Z2Z));
checkCudaErrors(hipMalloc((void**)&freq_gpu_ext, sizeof(hipfftDoubleComplex)*NX*NE));
checkCudaErrors(hipMalloc((void**)&phi_gpu_ext, sizeof(hipfftDoubleComplex)*NX*NE));
checkCudaErrors(hipMalloc((void**)&charge_gpu_ext, sizeof(hipfftDoubleComplex)*NX*NE));
// Setup the frequencies kx and ky
for (unsigned i = 0; i <= NX / 2; i++)
{
kx_host[i] = (double)i * 2.0 * M_PI / Lx_host;
}
for (unsigned i = NX / 2 + 1; i < NX; i++)
{
kx_host[i] = ((double) i - NX) * 2.0 * M_PI / Lx_host;
}
for (unsigned i = 0; i <= NE / 2; i++)
{
ky_host[i] = (double)i * 2.0 * M_PI / (NE*dy_host);
}
for (unsigned i = NE / 2 + 1; i < NE; i++)
{
ky_host[i] = ((double)i - NE) * 2.0 * M_PI / (NE*dy_host);
}
CHECK(hipMemcpy(kx, kx_host,
sizeof(double) * NX, hipMemcpyHostToDevice));
CHECK(hipMemcpy(ky, ky_host,
sizeof(double) * NE, hipMemcpyHostToDevice));
//const size_t mem_size_props = 7*NX/nThreads*NY*sizeof(double);
//checkCudaErrors(hipMalloc((void**)&prop_gpu,mem_size_props));
double *scalar_host = (double*) malloc(mem_size_scalar);
if(scalar_host == NULL)
{
fprintf(stderr,"Error: unable to allocate required host memory (%.1f MiB).\n",mem_size_scalar/bytesPerMiB);
exit(-1);
}
size_t total_mem_bytes = 2*mem_size_0dir + 4 * mem_size_n0dir + 7 * mem_size_scalar;// +mem_size_props;
// create event objects
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// Zero flow at t=0
// to initialize rho, charge, phi, ux, uy, Ex, Ey fields.
if (flag == 1) {
read_data(&t, rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, Ex_gpu, Ey_gpu, T_gpu);
}
else {
initialization(rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, Ex_gpu, Ey_gpu, T_gpu);
t = 0;
}
// initialise f1,h1 as equilibrium for rho, ux, uy, charge, ex, ey
init_equilibrium(f0_gpu,f1_gpu,h0_gpu,h1_gpu, temp0_gpu, temp1_gpu, rho_gpu,charge_gpu, ux_gpu,uy_gpu,Ex_gpu,Ey_gpu, T_gpu);
// open file for writing
FILE *fout = fopen("data.dat", "wb+");
save_data_tecplot(fout, t, rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, Ex_gpu, Ey_gpu, T_gpu, 1);
// file for saving
FILE *fumax = fopen("umax.dat", "wb+");
// report computational results to screen
report_flow_properties(0, t, rho_gpu, charge_gpu, phi_gpu, ux_gpu,uy_gpu, Ex_gpu, Ey_gpu);
double begin = seconds();
checkCudaErrors(hipEventRecord(start,0));
// main simulation loop; take NSTEPS time steps
for(unsigned int i = 0; i <= NSTEPS; ++i)
{
// stream and collide from f1 storing to f2
// optionally compute and save moments
stream_collide_save(f0_gpu,f1_gpu,f2_gpu, h0_gpu, h1_gpu, h2_gpu, temp0_gpu, temp1_gpu, temp2_gpu, rho_gpu,charge_gpu,
ux_gpu,uy_gpu, Ex_gpu, Ey_gpu, T_gpu, t, f0bc);
// =========================================================================
// Fast poisson solver
// =========================================================================
// Extend the domain
extension(charge_gpu, charge_gpu_ext);
// Execute a real-to-complex 2D FFT
CHECK_CUFFT(hipfftExecZ2Z(plan, charge_gpu_ext, freq_gpu_ext, HIPFFT_FORWARD));
// Execute the derivatives in frequency domain
derivative(kx, ky, freq_gpu_ext);
// Execute a complex-to-complex 2D IFFT
CHECK_CUFFT(hipfftExecZ2Z(plan, freq_gpu_ext, phi_gpu_ext, HIPFFT_BACKWARD));
// Extraction of phi from extended domain phi_gpu_ext
extract(phi_gpu, phi_gpu_ext);
// Calculate electric field strength
efield(phi_gpu, Ex_gpu, Ey_gpu);
if(i%NSAVE == 1)
{
save_data_tecplot(fout, t, rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, Ex_gpu, Ey_gpu,T_gpu, 1);
}
if (i%NDMD == 1)
{
printf("Iteration: %u, physical time: %g.\n", i, t);
// save for MATLAB postprocessing
char filename[128];
//sprintf(filename, "%g.dat", t);
sprintf(filename, "charge_data");
FILE *fout2 = fopen(filename, "ab");
save_data_dmd(fout2, t, ux_gpu, uy_gpu, charge_gpu, phi_gpu);
fclose(fout2);
}
if (i%printCurrent == 1) {
checkCudaErrors(hipMemcpy(charge_host, charge_gpu, mem_size_scalar, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(Ey_host, Ey_gpu, mem_size_scalar, hipMemcpyDeviceToHost));
double current_host = current(charge_host, Ey_host);
printf("Iteration: %u, physical time: %g, Current = %g\n", i, t, current_host);
//printf("%g\n", Ez_host[scalar_index(0, 0, 0)]);
// =============================================================================================================
// save umax
// =============================================================================================================
record_umax(fumax, t, ux_gpu, uy_gpu);
}
t = t + dt_host;
}
checkCudaErrors(hipEventRecord(stop,0));
checkCudaErrors(hipEventSynchronize(stop));
float milliseconds = 0.0f;
checkCudaErrors(hipEventElapsedTime(&milliseconds,start,stop));
double end = seconds();
double runtime = end-begin;
double gpu_runtime = 0.001*milliseconds;
size_t doubles_read = ndir; // per node every time step
size_t doubles_written = ndir;
size_t doubles_saved = 3; // per node every NSAVE time steps
// note NX*NY overflows when NX=NY=65536
size_t nodes_updated = NSTEPS*size_t(NX*NY);
size_t nodes_saved = (NSTEPS/NSAVE)*size_t(NX*NY);
double speed = nodes_updated/(1e6*runtime);
double bandwidth = (nodes_updated*(doubles_read + doubles_written)+nodes_saved*(doubles_saved))*sizeof(double)/(runtime*bytesPerGiB);
printf(" ----- performance information -----\n");
printf(" memory allocated (GPU): %.1f (MiB)\n",total_mem_bytes/bytesPerMiB);
printf(" memory allocated (host): %.1f (MiB)\n",mem_size_scalar/bytesPerMiB);
printf(" timesteps: %u\n",NSTEPS);
printf(" clock runtime: %.3f (s)\n",runtime);
printf(" gpu runtime: %.3f (s)\n",gpu_runtime);
printf(" speed: %.2f (Mlups)\n",speed);
printf(" bandwidth: %.1f (GiB/s)\n",bandwidth);
save_data_tecplot(fout, t, rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, Ex_gpu, Ey_gpu, T_gpu, 0);
fclose(fout);
// =============================================================================================================
// save umax
// =============================================================================================================
record_umax(fumax, t, ux_gpu, uy_gpu);
fclose(fumax);
FILE *fend = fopen("data_end.dat", "wb+");
save_data_end(fend, t, rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, Ex_gpu, Ey_gpu, T_gpu);
fclose(fend);
// destory event objects
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
// free all memory allocatd on the GPU and host
checkCudaErrors(hipFree(f0_gpu));
checkCudaErrors(hipFree(f1_gpu));
checkCudaErrors(hipFree(f2_gpu));
checkCudaErrors(hipFree(h0_gpu));
checkCudaErrors(hipFree(h1_gpu));
checkCudaErrors(hipFree(h2_gpu));
checkCudaErrors(hipFree(rho_gpu));
checkCudaErrors(hipFree(phi_gpu));
checkCudaErrors(hipFree(Ex_gpu));
checkCudaErrors(hipFree(Ey_gpu));
checkCudaErrors(hipFree(ux_gpu));
checkCudaErrors(hipFree(uy_gpu));
checkCudaErrors(hipFree(f0bc));
checkCudaErrors(hipFree(charge_gpu_ext));
checkCudaErrors(hipFree(phi_gpu_ext));
checkCudaErrors(hipFree(freq_gpu_ext));
checkCudaErrors(hipFree(kx));
checkCudaErrors(hipFree(ky));
CHECK_CUFFT(hipfftDestroy(plan));
//checkCudaErrors(hipFree(prop_gpu));
free(scalar_host);
free(kx_host);
free(ky_host);
// release resources associated with the GPU device
hipDeviceReset();
system("pause");
return 0;
}
| 6da5df88930fbb1271540d9995799411090b3981.cu | /* This code accompanies
* Two relaxation time lattice Boltzmann method coupled to fast Fourier transform Poisson solver: Application to electroconvective flow, Journal of Computational Physics
* https://doi.org/10.1016/j.jcp.2019.07.029
* Numerical analysis of electroconvection in cross-flow with unipolar charge injection, Physical Review Fluids
* https://doi.org/10.1103/PhysRevFluids.4.103701
*
* Yifei Guan, Igor Novosselov
* University of Washington
*
* Author: Yifei Guan
*
*/
#define _CRT_SECURE_NO_WARNINGS
#include <stdio.h>
#include <stdlib.h>
#include "seconds.h"
#include "LBM.h"
#include "LBM.cu"
#include "poisson.cu"
#include <cuda_runtime.h>
#include <cufft.h>
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
int main(int argc, char* argv[])
{
cudaMemcpyFromSymbol(&dt_host, dt, sizeof(double), 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&Lx_host, Lx, sizeof(double), 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&Ly_host, Ly, sizeof(double), 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&dy_host, dy, sizeof(double), 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&voltage_host, voltage, sizeof(double), 0, cudaMemcpyDeviceToHost);
cudaMemcpyToSymbol(nu, &nu_host, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(uw, &uw_host, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(exf, &exf_host, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(K, &K_host, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(D, &D_host, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(G, &G_host, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(Beta, &Beta_host, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(TH, &TH_host, sizeof(double), 0, cudaMemcpyHostToDevice);
// Compute parameters
compute_parameters(T, M, C, Fe, Ra, Pr);
printf("Simulating Electro-Thermo-convection in 2D\n");
printf(" domain size: %ux%u\n",NX,NY);
printf(" T: %g\n",*T);
printf(" M: %g\n",*M);
printf(" C: %g\n",*C);
printf(" Fe: %g\n",*Fe);
printf(" Ra: %g\n", *Ra);
printf(" Pr: %g\n", *Pr);
printf(" timesteps: %u\n",NSTEPS);
printf(" save every: %u\n",NSAVE);
printf(" message every: %u\n",NMSG);
printf("\n");
double bytesPerMiB = 1024.0*1024.0;
double bytesPerGiB = 1024.0*1024.0*1024.0;
checkCudaErrors(cudaSetDevice(0));
int deviceId = 0;
checkCudaErrors(cudaGetDevice(&deviceId));
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, deviceId));
size_t gpu_free_mem, gpu_total_mem;
checkCudaErrors(cudaMemGetInfo(&gpu_free_mem,&gpu_total_mem));
printf("CUDA information\n");
printf(" using device: %d\n", deviceId);
printf(" name: %s\n",deviceProp.name);
printf(" multiprocessors: %d\n",deviceProp.multiProcessorCount);
printf(" compute capability: %d.%d\n",deviceProp.major,deviceProp.minor);
printf(" global memory: %.1f MiB\n",deviceProp.totalGlobalMem/bytesPerMiB);
printf(" free memory: %.1f MiB\n",gpu_free_mem/bytesPerMiB);
printf("\n");
// storage of f0 at upper and lower plate
checkCudaErrors(cudaMalloc((void**)&f0bc, sizeof(double)*NX * 2));
//double *prop_gpu;
// microscopic variables
checkCudaErrors(cudaMalloc((void**)&f0_gpu, mem_size_0dir));
checkCudaErrors(cudaMalloc((void**)&f1_gpu, mem_size_n0dir));
checkCudaErrors(cudaMalloc((void**)&f2_gpu, mem_size_n0dir));
checkCudaErrors(cudaMalloc((void**)&h0_gpu, mem_size_0dir));
checkCudaErrors(cudaMalloc((void**)&h1_gpu, mem_size_n0dir));
checkCudaErrors(cudaMalloc((void**)&h2_gpu, mem_size_n0dir));
checkCudaErrors(cudaMalloc((void**)&temp0_gpu, mem_size_0dir));
checkCudaErrors(cudaMalloc((void**)&temp1_gpu, mem_size_n0dir));
checkCudaErrors(cudaMalloc((void**)&temp2_gpu, mem_size_n0dir));
// macroscopic variables
checkCudaErrors(cudaMalloc((void**)&rho_gpu, mem_size_scalar));
checkCudaErrors(cudaMalloc((void**)&ux_gpu, mem_size_scalar));
checkCudaErrors(cudaMalloc((void**)&uy_gpu, mem_size_scalar));
checkCudaErrors(cudaMalloc((void**)&charge_gpu, mem_size_scalar));
checkCudaErrors(cudaMalloc((void**)&phi_gpu, mem_size_scalar));
checkCudaErrors(cudaMalloc((void**)&T_gpu, mem_size_scalar));
checkCudaErrors(cudaMalloc((void**)&Ex_gpu, mem_size_scalar));
checkCudaErrors(cudaMalloc((void**)&Ey_gpu, mem_size_scalar));
checkCudaErrors(cudaMalloc((void**)&kx, sizeof(double)*NX));
checkCudaErrors(cudaMalloc((void**)&ky, sizeof(double)*NE));
double *kx_host = (double*)malloc(sizeof(double)*NX);
double *ky_host = (double*)malloc(sizeof(double)*NE);
// Setup the cuFFT plan
CHECK_CUFFT(cufftPlan2d(&plan, NE, NX, CUFFT_Z2Z));
checkCudaErrors(cudaMalloc((void**)&freq_gpu_ext, sizeof(cufftDoubleComplex)*NX*NE));
checkCudaErrors(cudaMalloc((void**)&phi_gpu_ext, sizeof(cufftDoubleComplex)*NX*NE));
checkCudaErrors(cudaMalloc((void**)&charge_gpu_ext, sizeof(cufftDoubleComplex)*NX*NE));
// Setup the frequencies kx and ky
for (unsigned i = 0; i <= NX / 2; i++)
{
kx_host[i] = (double)i * 2.0 * M_PI / Lx_host;
}
for (unsigned i = NX / 2 + 1; i < NX; i++)
{
kx_host[i] = ((double) i - NX) * 2.0 * M_PI / Lx_host;
}
for (unsigned i = 0; i <= NE / 2; i++)
{
ky_host[i] = (double)i * 2.0 * M_PI / (NE*dy_host);
}
for (unsigned i = NE / 2 + 1; i < NE; i++)
{
ky_host[i] = ((double)i - NE) * 2.0 * M_PI / (NE*dy_host);
}
CHECK(cudaMemcpy(kx, kx_host,
sizeof(double) * NX, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(ky, ky_host,
sizeof(double) * NE, cudaMemcpyHostToDevice));
//const size_t mem_size_props = 7*NX/nThreads*NY*sizeof(double);
//checkCudaErrors(cudaMalloc((void**)&prop_gpu,mem_size_props));
double *scalar_host = (double*) malloc(mem_size_scalar);
if(scalar_host == NULL)
{
fprintf(stderr,"Error: unable to allocate required host memory (%.1f MiB).\n",mem_size_scalar/bytesPerMiB);
exit(-1);
}
size_t total_mem_bytes = 2*mem_size_0dir + 4 * mem_size_n0dir + 7 * mem_size_scalar;// +mem_size_props;
// create event objects
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// Zero flow at t=0
// to initialize rho, charge, phi, ux, uy, Ex, Ey fields.
if (flag == 1) {
read_data(&t, rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, Ex_gpu, Ey_gpu, T_gpu);
}
else {
initialization(rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, Ex_gpu, Ey_gpu, T_gpu);
t = 0;
}
// initialise f1,h1 as equilibrium for rho, ux, uy, charge, ex, ey
init_equilibrium(f0_gpu,f1_gpu,h0_gpu,h1_gpu, temp0_gpu, temp1_gpu, rho_gpu,charge_gpu, ux_gpu,uy_gpu,Ex_gpu,Ey_gpu, T_gpu);
// open file for writing
FILE *fout = fopen("data.dat", "wb+");
save_data_tecplot(fout, t, rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, Ex_gpu, Ey_gpu, T_gpu, 1);
// file for saving
FILE *fumax = fopen("umax.dat", "wb+");
// report computational results to screen
report_flow_properties(0, t, rho_gpu, charge_gpu, phi_gpu, ux_gpu,uy_gpu, Ex_gpu, Ey_gpu);
double begin = seconds();
checkCudaErrors(cudaEventRecord(start,0));
// main simulation loop; take NSTEPS time steps
for(unsigned int i = 0; i <= NSTEPS; ++i)
{
// stream and collide from f1 storing to f2
// optionally compute and save moments
stream_collide_save(f0_gpu,f1_gpu,f2_gpu, h0_gpu, h1_gpu, h2_gpu, temp0_gpu, temp1_gpu, temp2_gpu, rho_gpu,charge_gpu,
ux_gpu,uy_gpu, Ex_gpu, Ey_gpu, T_gpu, t, f0bc);
// =========================================================================
// Fast poisson solver
// =========================================================================
// Extend the domain
extension(charge_gpu, charge_gpu_ext);
// Execute a real-to-complex 2D FFT
CHECK_CUFFT(cufftExecZ2Z(plan, charge_gpu_ext, freq_gpu_ext, CUFFT_FORWARD));
// Execute the derivatives in frequency domain
derivative(kx, ky, freq_gpu_ext);
// Execute a complex-to-complex 2D IFFT
CHECK_CUFFT(cufftExecZ2Z(plan, freq_gpu_ext, phi_gpu_ext, CUFFT_INVERSE));
// Extraction of phi from extended domain phi_gpu_ext
extract(phi_gpu, phi_gpu_ext);
// Calculate electric field strength
efield(phi_gpu, Ex_gpu, Ey_gpu);
if(i%NSAVE == 1)
{
save_data_tecplot(fout, t, rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, Ex_gpu, Ey_gpu,T_gpu, 1);
}
if (i%NDMD == 1)
{
printf("Iteration: %u, physical time: %g.\n", i, t);
// save for MATLAB postprocessing
char filename[128];
//sprintf(filename, "%g.dat", t);
sprintf(filename, "charge_data");
FILE *fout2 = fopen(filename, "ab");
save_data_dmd(fout2, t, ux_gpu, uy_gpu, charge_gpu, phi_gpu);
fclose(fout2);
}
if (i%printCurrent == 1) {
checkCudaErrors(cudaMemcpy(charge_host, charge_gpu, mem_size_scalar, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(Ey_host, Ey_gpu, mem_size_scalar, cudaMemcpyDeviceToHost));
double current_host = current(charge_host, Ey_host);
printf("Iteration: %u, physical time: %g, Current = %g\n", i, t, current_host);
//printf("%g\n", Ez_host[scalar_index(0, 0, 0)]);
// =============================================================================================================
// save umax
// =============================================================================================================
record_umax(fumax, t, ux_gpu, uy_gpu);
}
t = t + dt_host;
}
checkCudaErrors(cudaEventRecord(stop,0));
checkCudaErrors(cudaEventSynchronize(stop));
float milliseconds = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&milliseconds,start,stop));
double end = seconds();
double runtime = end-begin;
double gpu_runtime = 0.001*milliseconds;
size_t doubles_read = ndir; // per node every time step
size_t doubles_written = ndir;
size_t doubles_saved = 3; // per node every NSAVE time steps
// note NX*NY overflows when NX=NY=65536
size_t nodes_updated = NSTEPS*size_t(NX*NY);
size_t nodes_saved = (NSTEPS/NSAVE)*size_t(NX*NY);
double speed = nodes_updated/(1e6*runtime);
double bandwidth = (nodes_updated*(doubles_read + doubles_written)+nodes_saved*(doubles_saved))*sizeof(double)/(runtime*bytesPerGiB);
printf(" ----- performance information -----\n");
printf(" memory allocated (GPU): %.1f (MiB)\n",total_mem_bytes/bytesPerMiB);
printf(" memory allocated (host): %.1f (MiB)\n",mem_size_scalar/bytesPerMiB);
printf(" timesteps: %u\n",NSTEPS);
printf(" clock runtime: %.3f (s)\n",runtime);
printf(" gpu runtime: %.3f (s)\n",gpu_runtime);
printf(" speed: %.2f (Mlups)\n",speed);
printf(" bandwidth: %.1f (GiB/s)\n",bandwidth);
save_data_tecplot(fout, t, rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, Ex_gpu, Ey_gpu, T_gpu, 0);
fclose(fout);
// =============================================================================================================
// save umax
// =============================================================================================================
record_umax(fumax, t, ux_gpu, uy_gpu);
fclose(fumax);
FILE *fend = fopen("data_end.dat", "wb+");
save_data_end(fend, t, rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, Ex_gpu, Ey_gpu, T_gpu);
fclose(fend);
// destory event objects
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
// free all memory allocatd on the GPU and host
checkCudaErrors(cudaFree(f0_gpu));
checkCudaErrors(cudaFree(f1_gpu));
checkCudaErrors(cudaFree(f2_gpu));
checkCudaErrors(cudaFree(h0_gpu));
checkCudaErrors(cudaFree(h1_gpu));
checkCudaErrors(cudaFree(h2_gpu));
checkCudaErrors(cudaFree(rho_gpu));
checkCudaErrors(cudaFree(phi_gpu));
checkCudaErrors(cudaFree(Ex_gpu));
checkCudaErrors(cudaFree(Ey_gpu));
checkCudaErrors(cudaFree(ux_gpu));
checkCudaErrors(cudaFree(uy_gpu));
checkCudaErrors(cudaFree(f0bc));
checkCudaErrors(cudaFree(charge_gpu_ext));
checkCudaErrors(cudaFree(phi_gpu_ext));
checkCudaErrors(cudaFree(freq_gpu_ext));
checkCudaErrors(cudaFree(kx));
checkCudaErrors(cudaFree(ky));
CHECK_CUFFT(cufftDestroy(plan));
//checkCudaErrors(cudaFree(prop_gpu));
free(scalar_host);
free(kx_host);
free(ky_host);
// release resources associated with the GPU device
cudaDeviceReset();
system("pause");
return 0;
}
|
129a9facf95bf98785963765675ace5d07c6a579.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "VectorAdd.h"
__global__ void _vectorAdd(float *A, float *B, float *C, unsigned int size, unsigned int column)
{
unsigned int blk = blockIdx.x * blockDim.x;
unsigned int dx = threadIdx.x;
unsigned int dy = threadIdx.y * column;
unsigned idx = dx + dy + blk;
//__shared__ float values[1024];
if (idx < size)
C[idx] = A[idx] + B[idx];
}
void vectorAdd(float *A, float *B, float *C, unsigned int size)
{
dim3 threads(32, 32);
dim3 blocks((size + 1023) / 1024);
hipLaunchKernelGGL(( _vectorAdd), dim3(blocks), dim3(threads), 0, 0, A, B, C, size, 32);
}
| 129a9facf95bf98785963765675ace5d07c6a579.cu | #include "VectorAdd.h"
__global__ void _vectorAdd(float *A, float *B, float *C, unsigned int size, unsigned int column)
{
unsigned int blk = blockIdx.x * blockDim.x;
unsigned int dx = threadIdx.x;
unsigned int dy = threadIdx.y * column;
unsigned idx = dx + dy + blk;
//__shared__ float values[1024];
if (idx < size)
C[idx] = A[idx] + B[idx];
}
void vectorAdd(float *A, float *B, float *C, unsigned int size)
{
dim3 threads(32, 32);
dim3 blocks((size + 1023) / 1024);
_vectorAdd<<<blocks, threads>>>(A, B, C, size, 32);
}
|
70ab2d2c0e28f42f5b902dd966aa52115b49164f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2014 Martin Ueding <dev@martin-ueding.de>
#include "random_walk.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
void handle_error(hipError_t err, int line) {
if (err != hipSuccess) {
printf("%s at line %d\n", hipGetErrorString(err), line);
exit(EXIT_FAILURE);
}
}
void plot_histogram(int bin_count, int walker_count, float *distances) {
int *bins = (int *) malloc(sizeof(int) * bin_count);
for (int bin_idx = 0; bin_idx != bin_count; bin_idx++) {
bins[bin_idx] = 0;
}
float max = 0.0;
for (int walker = 0; walker != walker_count; walker++) {
if (distances[walker] > max) {
max = distances[walker];
}
}
for (int walker = 0; walker != walker_count; walker++) {
int bin_idx = distances[walker] * bin_count / max;
if (bin_idx >= bin_count) {
bin_idx = bin_count - 1;
}
bins[bin_idx]++;
}
int slot_max = 0;
for (int bin_idx = 0; bin_idx != bin_count; bin_idx++) {
if (bins[bin_idx] > slot_max) {
slot_max = bins[bin_idx];
}
}
for (int bin_idx = 0; bin_idx != bin_count; bin_idx++) {
printf("%10.1f ", bin_idx * max / bin_count);
for (int slot_idx = 0; slot_idx != bins[bin_idx] * 50 / slot_max; slot_idx++) {
printf("#");
}
printf("\n");
}
free(bins);
}
double compute_average(float *data, int count) {
double sum = 0.0;
for (int idx = 0; idx != count; idx++) {
sum += data[idx];
}
return sum / count;
}
double compute_rms(float *data, int count) {
double sum = 0.0;
for (int idx = 0; idx != count; idx++) {
sum += data[idx] * data[idx];
}
return sqrt(sum / count);
}
int main(int argc, char **argv) {
int walker_count = 1000;
int bin_count = 30;
hipError_t err;
// Get memory to store distance in.
float *distances_host;
size_t distances_size = sizeof(*distances_host) * walker_count;
distances_host = (float *) malloc(distances_size);
assert(distances_host);
float *distances_dev;
err = hipMalloc(&distances_dev, distances_size);
handle_error(err, __LINE__);
int *walkers_dev;
err = hipMalloc(&walkers_dev, sizeof(*walkers_dev) * 2 * walker_count);
handle_error(err, __LINE__);
hiprandState_t *curand_states_dev;
err = hipMalloc(&curand_states_dev, sizeof(*curand_states_dev) * walker_count);
handle_error(err, __LINE__);
int block_size = 256;
hipLaunchKernelGGL(( init_kernel), dim3((walker_count-1)/block_size + 1), dim3(block_size) , 0, 0,
walker_count, walkers_dev, distances_dev, curand_states_dev);
int steps_per_iter = 100;
FILE *averages_stream = fopen("averages.csv", "w");
FILE *rms_stream = fopen("rms.csv", "w");
int iter_count = 100;
for (int iter = 0; iter != iter_count; iter++) {
clock_t start = clock();
hipLaunchKernelGGL(( random_walk_kernel), dim3((walker_count-1)/block_size + 1), dim3(block_size) , 0, 0,
walker_count,
steps_per_iter,
walkers_dev,
distances_dev,
curand_states_dev
);
// Copy the results back to the host.
err = hipMemcpy(distances_host, distances_dev, distances_size, hipMemcpyDeviceToHost);
handle_error(err, __LINE__);
clock_t end = clock();
double average = compute_average(distances_host, walker_count);
double rms = compute_rms(distances_host, walker_count);
if (iter == iter_count - 1) {
printf("The part on the GPU for %d walkers for %d steps took %g seconds.\n", walker_count, iter * steps_per_iter, (end-start) / (float) CLOCKS_PER_SEC);
plot_histogram(bin_count, walker_count, distances_host);
printf("\n");
}
fprintf(averages_stream, "%d %f\n", iter * steps_per_iter, average);
fprintf(rms_stream, "%d %f\n", iter * steps_per_iter, rms);
}
fclose(averages_stream);
fclose(rms_stream);
free(distances_host);
hipFree(distances_dev);
hipFree(curand_states_dev);
hipFree(walkers_dev);
return 0;
}
| 70ab2d2c0e28f42f5b902dd966aa52115b49164f.cu | // Copyright © 2014 Martin Ueding <dev@martin-ueding.de>
#include "random_walk.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
void handle_error(cudaError_t err, int line) {
if (err != cudaSuccess) {
printf("%s at line %d\n", cudaGetErrorString(err), line);
exit(EXIT_FAILURE);
}
}
void plot_histogram(int bin_count, int walker_count, float *distances) {
int *bins = (int *) malloc(sizeof(int) * bin_count);
for (int bin_idx = 0; bin_idx != bin_count; bin_idx++) {
bins[bin_idx] = 0;
}
float max = 0.0;
for (int walker = 0; walker != walker_count; walker++) {
if (distances[walker] > max) {
max = distances[walker];
}
}
for (int walker = 0; walker != walker_count; walker++) {
int bin_idx = distances[walker] * bin_count / max;
if (bin_idx >= bin_count) {
bin_idx = bin_count - 1;
}
bins[bin_idx]++;
}
int slot_max = 0;
for (int bin_idx = 0; bin_idx != bin_count; bin_idx++) {
if (bins[bin_idx] > slot_max) {
slot_max = bins[bin_idx];
}
}
for (int bin_idx = 0; bin_idx != bin_count; bin_idx++) {
printf("%10.1f ", bin_idx * max / bin_count);
for (int slot_idx = 0; slot_idx != bins[bin_idx] * 50 / slot_max; slot_idx++) {
printf("#");
}
printf("\n");
}
free(bins);
}
double compute_average(float *data, int count) {
double sum = 0.0;
for (int idx = 0; idx != count; idx++) {
sum += data[idx];
}
return sum / count;
}
double compute_rms(float *data, int count) {
double sum = 0.0;
for (int idx = 0; idx != count; idx++) {
sum += data[idx] * data[idx];
}
return sqrt(sum / count);
}
int main(int argc, char **argv) {
int walker_count = 1000;
int bin_count = 30;
cudaError_t err;
// Get memory to store distance in.
float *distances_host;
size_t distances_size = sizeof(*distances_host) * walker_count;
distances_host = (float *) malloc(distances_size);
assert(distances_host);
float *distances_dev;
err = cudaMalloc(&distances_dev, distances_size);
handle_error(err, __LINE__);
int *walkers_dev;
err = cudaMalloc(&walkers_dev, sizeof(*walkers_dev) * 2 * walker_count);
handle_error(err, __LINE__);
curandState_t *curand_states_dev;
err = cudaMalloc(&curand_states_dev, sizeof(*curand_states_dev) * walker_count);
handle_error(err, __LINE__);
int block_size = 256;
init_kernel<<< (walker_count-1)/block_size + 1, block_size >>>(
walker_count, walkers_dev, distances_dev, curand_states_dev);
int steps_per_iter = 100;
FILE *averages_stream = fopen("averages.csv", "w");
FILE *rms_stream = fopen("rms.csv", "w");
int iter_count = 100;
for (int iter = 0; iter != iter_count; iter++) {
clock_t start = clock();
random_walk_kernel<<< (walker_count-1)/block_size + 1, block_size >>>(
walker_count,
steps_per_iter,
walkers_dev,
distances_dev,
curand_states_dev
);
// Copy the results back to the host.
err = cudaMemcpy(distances_host, distances_dev, distances_size, cudaMemcpyDeviceToHost);
handle_error(err, __LINE__);
clock_t end = clock();
double average = compute_average(distances_host, walker_count);
double rms = compute_rms(distances_host, walker_count);
if (iter == iter_count - 1) {
printf("The part on the GPU for %d walkers for %d steps took %g seconds.\n", walker_count, iter * steps_per_iter, (end-start) / (float) CLOCKS_PER_SEC);
plot_histogram(bin_count, walker_count, distances_host);
printf("\n");
}
fprintf(averages_stream, "%d %f\n", iter * steps_per_iter, average);
fprintf(rms_stream, "%d %f\n", iter * steps_per_iter, rms);
}
fclose(averages_stream);
fclose(rms_stream);
free(distances_host);
cudaFree(distances_dev);
cudaFree(curand_states_dev);
cudaFree(walkers_dev);
return 0;
}
|
42aadfc9c067976d9fe5ad83fb79a3a10539f2d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#define BLOCK_SIZE 256
#define STR_SIZE 256
#define DEVICE 0
#define HALO 1 // halo width along one direction when advancing to the next iteration
// #define BENCH_PRINT
void run(int argc, char** argv);
int rows, cols;
int* data;
int** wall;
int* result;
#define M_SEED 9
int pyramid_height;
//#define BENCH_PRINT
void
init(int argc, char** argv)
{
if(argc==4){
cols = atoi(argv[1]);
rows = atoi(argv[2]);
pyramid_height=atoi(argv[3]);
}else{
printf("Usage: dynproc row_len col_len pyramid_height\n");
exit(0);
}
data = new int[rows*cols];
wall = new int*[rows];
for(int n=0; n<rows; n++)
wall[n]=data+cols*n;
result = new int[cols];
int seed = M_SEED;
srand(seed);
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
wall[i][j] = rand() % 10;
}
}
#ifdef BENCH_PRINT
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
printf("%d ",wall[i][j]) ;
}
printf("\n") ;
}
#endif
}
void
fatal(char *s)
{
fprintf(stderr, "error: %s\n", s);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void dynproc_kernel(
int iteration,
int *gpuWall,
int *gpuSrc,
int *gpuResults,
int cols,
int rows,
int startStep,
int border)
{
__shared__ int prev[BLOCK_SIZE];
__shared__ int result[BLOCK_SIZE];
int bx = blockIdx.x;
int tx=threadIdx.x;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_cols = BLOCK_SIZE-iteration*HALO*2;
// calculate the boundary for the block according to
// the boundary of its small block
int blkX = small_block_cols*bx-border;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int xidx = blkX+tx;
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1;
int W = tx-1;
int E = tx+1;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool isValid = IN_RANGE(tx, validXmin, validXmax);
if(IN_RANGE(xidx, 0, cols-1)){
prev[tx] = gpuSrc[xidx];
}
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
isValid){
computed = true;
int left = prev[W];
int up = prev[tx];
int right = prev[E];
int shortest = MIN(left, up);
shortest = MIN(shortest, right);
int index = cols*(startStep+i)+xidx;
result[tx] = shortest + gpuWall[index];
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
prev[tx]= result[tx];
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
gpuResults[xidx]=result[tx];
}
}
/*
compute N time steps
*/
int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols, \
int pyramid_height, int blockCols, int borderCols)
{
#ifdef PREF
hipStream_t stream1;
hipStream_t stream2;
hipStream_t stream3;
hipStream_t stream4;
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
hipStreamCreate(&stream3);
hipStreamCreate(&stream4);
hipMemPrefetchAsync(gpuWall,(rows*cols - cols)*sizeof(int), 0, stream1 );
hipMemPrefetchAsync(gpuResult[0],sizeof(int)*cols, 0, stream2 );
hipMemPrefetchAsync(gpuResult[1],sizeof(int)*cols, 0, stream3 );
hipStreamSynchronize(stream1);
hipStreamSynchronize(stream2);
hipStreamSynchronize(stream3);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(blockCols);
int src = 1, dst = 0;
for (int t = 0; t < rows-1; t+=pyramid_height) {
int temp = src;
src = dst;
dst = temp;
hipLaunchKernelGGL(( dynproc_kernel), dim3(dimGrid), dim3(dimBlock), 0 , stream4,
MIN(pyramid_height, rows-t-1),
gpuWall, gpuResult[src], gpuResult[dst],
cols,rows, t, borderCols);
}
return dst;
#else
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(blockCols);
int src = 1, dst = 0;
for (int t = 0; t < rows-1; t+=pyramid_height) {
int temp = src;
src = dst;
dst = temp;
hipLaunchKernelGGL(( dynproc_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
MIN(pyramid_height, rows-t-1),
gpuWall, gpuResult[src], gpuResult[dst],
cols,rows, t, borderCols);
}
return dst;
#endif
}
int main(int argc, char** argv)
{
int num_devices;
hipGetDeviceCount(&num_devices);
if (num_devices > 1) hipSetDevice(DEVICE);
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char** argv)
{
init(argc, argv);
/* --------------- pyramid parameters --------------- */
int borderCols = (pyramid_height)*HALO;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*HALO*2;
int blockCols = cols/smallBlockCol+((cols%smallBlockCol==0)?0:1);
printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: %d\nblockGrid:[%d]\ntargetBlock:[%d]\n",\
pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol);
int *gpuWall, *gpuResult[2];
int size = rows*cols;
printf("col is %d, size is %d\n", cols, size);
// hipMalloc((void**)&gpuResult[0], sizeof(int)*cols);
// hipMalloc((void**)&gpuResult[1], sizeof(int)*cols);
hipMallocManaged(&gpuResult[0], sizeof(int)*cols);
hipMallocManaged(&gpuResult[1], sizeof(int)*cols);
// hipMemcpy(gpuResult[0], data, sizeof(int)*cols, hipMemcpyHostToDevice);
memcpy(gpuResult[0], data, sizeof(int)*cols);
// hipMalloc((void**)&gpuWall, sizeof(int)*(size-cols));
hipMallocManaged(&gpuWall, sizeof(int)*(size-cols));
// hipMemcpy(gpuWall, data+cols, sizeof(int)*(size-cols), hipMemcpyHostToDevice);
memcpy(gpuWall, data+cols, sizeof(int)*(size-cols));
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float elapsed_time;
hipEventRecord(start, 0);
int final_ret;
for (int i = 0; i < 1; i ++)
final_ret = calc_path(gpuWall, gpuResult, rows, cols, \
pyramid_height, blockCols, borderCols);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
printf("\nTime taken is %lf seconds.\n", (elapsed_time)/1000);
// hipMemcpy(result, gpuResult[final_ret], sizeof(int)*cols, hipMemcpyDeviceToHost);
#ifdef BENCH_PRINT
for (int i = 0; i < cols; i++)
printf("%d ",data[i]) ;
printf("\n") ;
for (int i = 0; i < cols; i++)
printf("%d ", gpuResult[final_ret][i]);
// printf("%d ",result[i]) ;
printf("\n") ;
#endif
hipFree(gpuWall);
hipFree(gpuResult[0]);
hipFree(gpuResult[1]);
delete [] data;
delete [] wall;
delete [] result;
}
| 42aadfc9c067976d9fe5ad83fb79a3a10539f2d4.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#define BLOCK_SIZE 256
#define STR_SIZE 256
#define DEVICE 0
#define HALO 1 // halo width along one direction when advancing to the next iteration
// #define BENCH_PRINT
void run(int argc, char** argv);
int rows, cols;
int* data;
int** wall;
int* result;
#define M_SEED 9
int pyramid_height;
//#define BENCH_PRINT
void
init(int argc, char** argv)
{
if(argc==4){
cols = atoi(argv[1]);
rows = atoi(argv[2]);
pyramid_height=atoi(argv[3]);
}else{
printf("Usage: dynproc row_len col_len pyramid_height\n");
exit(0);
}
data = new int[rows*cols];
wall = new int*[rows];
for(int n=0; n<rows; n++)
wall[n]=data+cols*n;
result = new int[cols];
int seed = M_SEED;
srand(seed);
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
wall[i][j] = rand() % 10;
}
}
#ifdef BENCH_PRINT
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
printf("%d ",wall[i][j]) ;
}
printf("\n") ;
}
#endif
}
void
fatal(char *s)
{
fprintf(stderr, "error: %s\n", s);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void dynproc_kernel(
int iteration,
int *gpuWall,
int *gpuSrc,
int *gpuResults,
int cols,
int rows,
int startStep,
int border)
{
__shared__ int prev[BLOCK_SIZE];
__shared__ int result[BLOCK_SIZE];
int bx = blockIdx.x;
int tx=threadIdx.x;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_cols = BLOCK_SIZE-iteration*HALO*2;
// calculate the boundary for the block according to
// the boundary of its small block
int blkX = small_block_cols*bx-border;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int xidx = blkX+tx;
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1;
int W = tx-1;
int E = tx+1;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool isValid = IN_RANGE(tx, validXmin, validXmax);
if(IN_RANGE(xidx, 0, cols-1)){
prev[tx] = gpuSrc[xidx];
}
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
isValid){
computed = true;
int left = prev[W];
int up = prev[tx];
int right = prev[E];
int shortest = MIN(left, up);
shortest = MIN(shortest, right);
int index = cols*(startStep+i)+xidx;
result[tx] = shortest + gpuWall[index];
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
prev[tx]= result[tx];
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
gpuResults[xidx]=result[tx];
}
}
/*
compute N time steps
*/
int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols, \
int pyramid_height, int blockCols, int borderCols)
{
#ifdef PREF
cudaStream_t stream1;
cudaStream_t stream2;
cudaStream_t stream3;
cudaStream_t stream4;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
cudaStreamCreate(&stream3);
cudaStreamCreate(&stream4);
cudaMemPrefetchAsync(gpuWall,(rows*cols - cols)*sizeof(int), 0, stream1 );
cudaMemPrefetchAsync(gpuResult[0],sizeof(int)*cols, 0, stream2 );
cudaMemPrefetchAsync(gpuResult[1],sizeof(int)*cols, 0, stream3 );
cudaStreamSynchronize(stream1);
cudaStreamSynchronize(stream2);
cudaStreamSynchronize(stream3);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(blockCols);
int src = 1, dst = 0;
for (int t = 0; t < rows-1; t+=pyramid_height) {
int temp = src;
src = dst;
dst = temp;
dynproc_kernel<<<dimGrid, dimBlock, 0 , stream4>>>(
MIN(pyramid_height, rows-t-1),
gpuWall, gpuResult[src], gpuResult[dst],
cols,rows, t, borderCols);
}
return dst;
#else
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(blockCols);
int src = 1, dst = 0;
for (int t = 0; t < rows-1; t+=pyramid_height) {
int temp = src;
src = dst;
dst = temp;
dynproc_kernel<<<dimGrid, dimBlock>>>(
MIN(pyramid_height, rows-t-1),
gpuWall, gpuResult[src], gpuResult[dst],
cols,rows, t, borderCols);
}
return dst;
#endif
}
int main(int argc, char** argv)
{
int num_devices;
cudaGetDeviceCount(&num_devices);
if (num_devices > 1) cudaSetDevice(DEVICE);
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char** argv)
{
init(argc, argv);
/* --------------- pyramid parameters --------------- */
int borderCols = (pyramid_height)*HALO;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*HALO*2;
int blockCols = cols/smallBlockCol+((cols%smallBlockCol==0)?0:1);
printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: %d\nblockGrid:[%d]\ntargetBlock:[%d]\n",\
pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol);
int *gpuWall, *gpuResult[2];
int size = rows*cols;
printf("col is %d, size is %d\n", cols, size);
// cudaMalloc((void**)&gpuResult[0], sizeof(int)*cols);
// cudaMalloc((void**)&gpuResult[1], sizeof(int)*cols);
cudaMallocManaged(&gpuResult[0], sizeof(int)*cols);
cudaMallocManaged(&gpuResult[1], sizeof(int)*cols);
// cudaMemcpy(gpuResult[0], data, sizeof(int)*cols, cudaMemcpyHostToDevice);
memcpy(gpuResult[0], data, sizeof(int)*cols);
// cudaMalloc((void**)&gpuWall, sizeof(int)*(size-cols));
cudaMallocManaged(&gpuWall, sizeof(int)*(size-cols));
// cudaMemcpy(gpuWall, data+cols, sizeof(int)*(size-cols), cudaMemcpyHostToDevice);
memcpy(gpuWall, data+cols, sizeof(int)*(size-cols));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float elapsed_time;
cudaEventRecord(start, 0);
int final_ret;
for (int i = 0; i < 1; i ++)
final_ret = calc_path(gpuWall, gpuResult, rows, cols, \
pyramid_height, blockCols, borderCols);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf("\nTime taken is %lf seconds.\n", (elapsed_time)/1000);
// cudaMemcpy(result, gpuResult[final_ret], sizeof(int)*cols, cudaMemcpyDeviceToHost);
#ifdef BENCH_PRINT
for (int i = 0; i < cols; i++)
printf("%d ",data[i]) ;
printf("\n") ;
for (int i = 0; i < cols; i++)
printf("%d ", gpuResult[final_ret][i]);
// printf("%d ",result[i]) ;
printf("\n") ;
#endif
cudaFree(gpuWall);
cudaFree(gpuResult[0]);
cudaFree(gpuResult[1]);
delete [] data;
delete [] wall;
delete [] result;
}
|
195ad067328d4e2266f2f207e731e900c03d769e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "transpose.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
double *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
transpose), dim3(gridBlock),dim3(threadBlock), 0, 0, N,A);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
transpose), dim3(gridBlock),dim3(threadBlock), 0, 0, N,A);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
transpose), dim3(gridBlock),dim3(threadBlock), 0, 0, N,A);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 195ad067328d4e2266f2f207e731e900c03d769e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "transpose.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
double *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
transpose<<<gridBlock,threadBlock>>>(N,A);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
transpose<<<gridBlock,threadBlock>>>(N,A);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
transpose<<<gridBlock,threadBlock>>>(N,A);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
15cd53a6f0f8497d579a05ef51f083bf4177504f.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/IndexIVFFlat.h>
#include <faiss/index_io.h>
#include <faiss/utils/random.h>
#include <faiss/gpu/GpuIndexIVFFlat.h>
#include <faiss/gpu/perf/IndexWrapper.h>
#include <faiss/gpu/test/TestUtils.h>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/HostTensor.cuh>
#include <faiss/gpu/utils/Timer.h>
#include <gflags/gflags.h>
#include <map>
#include <memory>
#include <vector>
#include <hip/hip_runtime_api.h>
DEFINE_int32(nprobe, 5, "number of coarse centroids to probe");
DEFINE_int32(k, 3, "final number of closest results returned");
DEFINE_int32(num_queries, 3, "number of query vectors");
DEFINE_string(in, "/home/jhj/local/index.out", "index file for input");
DEFINE_bool(diff, true, "show exact distance + index output discrepancies");
DEFINE_bool(use_float16_coarse, false, "coarse quantizer in float16");
DEFINE_int64(seed, -1, "specify random seed");
DEFINE_int32(num_gpus, 1, "number of gpus to use");
DEFINE_int32(index, 2, "0 = no indices on GPU; 1 = 32 bit, 2 = 64 bit on GPU");
using namespace faiss::gpu;
int main(int argc, char** argv) {
gflags::ParseCommandLineFlags(&argc, &argv, true);
hipProfilerStop();
auto seed = FLAGS_seed != -1L ? FLAGS_seed : time(nullptr);
printf("using seed %ld\n", seed);
auto numQueries = FLAGS_num_queries;
auto index = std::unique_ptr<faiss::IndexIVFFlat>(
dynamic_cast<faiss::IndexIVFFlat*>(faiss::read_index(FLAGS_in.c_str())));
FAISS_ASSERT((bool) index);
index->nprobe = FLAGS_nprobe;
auto dim = index->d;
printf("Database: dim %d num vecs %ld\n", dim, index->ntotal);
printf("Coarse centroids: %ld\n", index->quantizer->ntotal);
printf("L2 lookup: %d queries, nprobe %d, total k %d\n",
numQueries, FLAGS_nprobe, FLAGS_k);
printf("float16 coarse quantizer %s\n",
FLAGS_use_float16_coarse ? "enabled" : "disabled");
// Convert to GPU index
printf("Copying index to %d GPU(s)...\n", FLAGS_num_gpus);
auto initFn = [&index](faiss::gpu::GpuResourcesProvider* res, int dev) ->
std::unique_ptr<faiss::gpu::GpuIndexIVFFlat> {
GpuIndexIVFFlatConfig config;
config.device = dev;
config.indicesOptions = (faiss::gpu::IndicesOptions) FLAGS_index;
config.flatConfig.useFloat16 = FLAGS_use_float16_coarse;
auto p = std::unique_ptr<faiss::gpu::GpuIndexIVFFlat>(
new faiss::gpu::GpuIndexIVFFlat(res,
index->d,
index->nlist,
index->metric_type,
config));
p->copyFrom(index.get());
return p;
};
IndexWrapper<faiss::gpu::GpuIndexIVFFlat> gpuIndex(FLAGS_num_gpus, initFn);
gpuIndex.setNumProbes(FLAGS_nprobe);
printf("copy done\n");
// Build query vectors
HostTensor<float, 2, true> cpuQuery({numQueries, dim});
faiss::float_rand(cpuQuery.data(), cpuQuery.numElements(), seed);
// Time faiss CPU
HostTensor<float, 2, true> cpuDistances({numQueries, FLAGS_k});
HostTensor<faiss::Index::idx_t, 2, true> cpuIndices({numQueries, FLAGS_k});
float cpuTime = 0.0f;
{
CpuTimer timer;
index->search(numQueries,
cpuQuery.data(),
FLAGS_k,
cpuDistances.data(),
cpuIndices.data());
cpuTime = timer.elapsedMilliseconds();
}
printf("CPU time %.3f ms\n", cpuTime);
HostTensor<float, 2, true> gpuDistances({numQueries, FLAGS_k});
HostTensor<faiss::Index::idx_t, 2, true> gpuIndices({numQueries, FLAGS_k});
CUDA_VERIFY(hipProfilerStart());
faiss::gpu::synchronizeAllDevices();
float gpuTime = 0.0f;
// Time GPU
{
CpuTimer timer;
gpuIndex.getIndex()->search(cpuQuery.getSize(0),
cpuQuery.data(),
FLAGS_k,
gpuDistances.data(),
gpuIndices.data());
// There is a device -> host copy above, so no need to time
// additional synchronization with the GPU
gpuTime = timer.elapsedMilliseconds();
}
CUDA_VERIFY(hipProfilerStop());
printf("GPU time %.3f ms\n", gpuTime);
compareLists(cpuDistances.data(), cpuIndices.data(),
gpuDistances.data(), gpuIndices.data(),
numQueries, FLAGS_k,
"", true, FLAGS_diff, false);
CUDA_VERIFY(hipDeviceSynchronize());
return 0;
}
| 15cd53a6f0f8497d579a05ef51f083bf4177504f.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/IndexIVFFlat.h>
#include <faiss/index_io.h>
#include <faiss/utils/random.h>
#include <faiss/gpu/GpuIndexIVFFlat.h>
#include <faiss/gpu/perf/IndexWrapper.h>
#include <faiss/gpu/test/TestUtils.h>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/HostTensor.cuh>
#include <faiss/gpu/utils/Timer.h>
#include <gflags/gflags.h>
#include <map>
#include <memory>
#include <vector>
#include <cuda_profiler_api.h>
DEFINE_int32(nprobe, 5, "number of coarse centroids to probe");
DEFINE_int32(k, 3, "final number of closest results returned");
DEFINE_int32(num_queries, 3, "number of query vectors");
DEFINE_string(in, "/home/jhj/local/index.out", "index file for input");
DEFINE_bool(diff, true, "show exact distance + index output discrepancies");
DEFINE_bool(use_float16_coarse, false, "coarse quantizer in float16");
DEFINE_int64(seed, -1, "specify random seed");
DEFINE_int32(num_gpus, 1, "number of gpus to use");
DEFINE_int32(index, 2, "0 = no indices on GPU; 1 = 32 bit, 2 = 64 bit on GPU");
using namespace faiss::gpu;
int main(int argc, char** argv) {
gflags::ParseCommandLineFlags(&argc, &argv, true);
cudaProfilerStop();
auto seed = FLAGS_seed != -1L ? FLAGS_seed : time(nullptr);
printf("using seed %ld\n", seed);
auto numQueries = FLAGS_num_queries;
auto index = std::unique_ptr<faiss::IndexIVFFlat>(
dynamic_cast<faiss::IndexIVFFlat*>(faiss::read_index(FLAGS_in.c_str())));
FAISS_ASSERT((bool) index);
index->nprobe = FLAGS_nprobe;
auto dim = index->d;
printf("Database: dim %d num vecs %ld\n", dim, index->ntotal);
printf("Coarse centroids: %ld\n", index->quantizer->ntotal);
printf("L2 lookup: %d queries, nprobe %d, total k %d\n",
numQueries, FLAGS_nprobe, FLAGS_k);
printf("float16 coarse quantizer %s\n",
FLAGS_use_float16_coarse ? "enabled" : "disabled");
// Convert to GPU index
printf("Copying index to %d GPU(s)...\n", FLAGS_num_gpus);
auto initFn = [&index](faiss::gpu::GpuResourcesProvider* res, int dev) ->
std::unique_ptr<faiss::gpu::GpuIndexIVFFlat> {
GpuIndexIVFFlatConfig config;
config.device = dev;
config.indicesOptions = (faiss::gpu::IndicesOptions) FLAGS_index;
config.flatConfig.useFloat16 = FLAGS_use_float16_coarse;
auto p = std::unique_ptr<faiss::gpu::GpuIndexIVFFlat>(
new faiss::gpu::GpuIndexIVFFlat(res,
index->d,
index->nlist,
index->metric_type,
config));
p->copyFrom(index.get());
return p;
};
IndexWrapper<faiss::gpu::GpuIndexIVFFlat> gpuIndex(FLAGS_num_gpus, initFn);
gpuIndex.setNumProbes(FLAGS_nprobe);
printf("copy done\n");
// Build query vectors
HostTensor<float, 2, true> cpuQuery({numQueries, dim});
faiss::float_rand(cpuQuery.data(), cpuQuery.numElements(), seed);
// Time faiss CPU
HostTensor<float, 2, true> cpuDistances({numQueries, FLAGS_k});
HostTensor<faiss::Index::idx_t, 2, true> cpuIndices({numQueries, FLAGS_k});
float cpuTime = 0.0f;
{
CpuTimer timer;
index->search(numQueries,
cpuQuery.data(),
FLAGS_k,
cpuDistances.data(),
cpuIndices.data());
cpuTime = timer.elapsedMilliseconds();
}
printf("CPU time %.3f ms\n", cpuTime);
HostTensor<float, 2, true> gpuDistances({numQueries, FLAGS_k});
HostTensor<faiss::Index::idx_t, 2, true> gpuIndices({numQueries, FLAGS_k});
CUDA_VERIFY(cudaProfilerStart());
faiss::gpu::synchronizeAllDevices();
float gpuTime = 0.0f;
// Time GPU
{
CpuTimer timer;
gpuIndex.getIndex()->search(cpuQuery.getSize(0),
cpuQuery.data(),
FLAGS_k,
gpuDistances.data(),
gpuIndices.data());
// There is a device -> host copy above, so no need to time
// additional synchronization with the GPU
gpuTime = timer.elapsedMilliseconds();
}
CUDA_VERIFY(cudaProfilerStop());
printf("GPU time %.3f ms\n", gpuTime);
compareLists(cpuDistances.data(), cpuIndices.data(),
gpuDistances.data(), gpuIndices.data(),
numQueries, FLAGS_k,
"", true, FLAGS_diff, false);
CUDA_VERIFY(cudaDeviceSynchronize());
return 0;
}
|
923c933540459da8575fd077dd162102440c5470.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ReduceRowMajor3.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *g_idata = NULL;
hipMalloc(&g_idata, XSIZE*YSIZE);
int *g_odata = NULL;
hipMalloc(&g_odata, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ReduceRowMajor3), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ReduceRowMajor3), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ReduceRowMajor3), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 923c933540459da8575fd077dd162102440c5470.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ReduceRowMajor3.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *g_idata = NULL;
cudaMalloc(&g_idata, XSIZE*YSIZE);
int *g_odata = NULL;
cudaMalloc(&g_odata, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ReduceRowMajor3<<<gridBlock,threadBlock>>>(g_idata,g_odata,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ReduceRowMajor3<<<gridBlock,threadBlock>>>(g_idata,g_odata,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ReduceRowMajor3<<<gridBlock,threadBlock>>>(g_idata,g_odata,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d04de9d47251fd6e5548e9002f06959deb2f395d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <time.h>
#include <vector>
#define QUEENS (14)
__global__ void countQueens(int* frontQueensPos, int* data, int* numFQP)
{
int localResult = 0;
//printf("%d\n", numFQP[0]);
int thisThread = ((blockIdx.x * gridDim.x + blockIdx.y) * gridDim.y + threadIdx.x)* blockDim.x + threadIdx.y;
// printf("1_%d %d %d %d %d %d %d %d\n", thisThread, blockIdx.x, gridDim.x, blockIdx.y, gridDim.y, threadIdx.x, blockDim.x, threadIdx.y);
// if (thisThread >= QUEENS * QUEENS * QUEENS * QUEENS)
// return;
if (blockIdx.x >= QUEENS || blockIdx.y >= QUEENS || threadIdx.x >= QUEENS || threadIdx.y >= QUEENS)
return;
int* queenPos = new int[QUEENS];
queenPos[3] = blockIdx.x;
queenPos[4] = blockIdx.y;
queenPos[5] = threadIdx.x;
queenPos[6] = threadIdx.y;
for (int i = 4; i <= 6; i++) {
for (int j = 3; j < i; j++) {
if ((queenPos[i] - i) == (queenPos[j] - j) || (queenPos[i] + i) == (queenPos[j] + j) || queenPos[i] == queenPos[j]) {
return;
}
}
}
int totalFQP = numFQP[0] / 3;
for (int FQP_number = 0; FQP_number < totalFQP; FQP_number++) {
// printf("1_%d %d %d %d %d %d %d %d\n", thisThread, blockIdx.x, gridDim.x, blockIdx.y, gridDim.y, threadIdx.x, blockDim.x, threadIdx.y);
// if (thisThread >= QUEENS * QUEENS * QUEENS * QUEENS)
// return;
for (int i = 0; i < 3; i++)
queenPos[i] = frontQueensPos[(FQP_number * 3) + i];
bool legal = true;
//if (blockIdx.x == 6 && blockIdx.y == 11 && threadIdx.x == 9 && threadIdx.y == 12)
// printf("1_%d %d %d %d %d %d %d_%d\n", queenPos[0], queenPos[1], queenPos[2], queenPos[3], queenPos[4], queenPos[5], queenPos[6], totalFQP);
for (int i = 3; i <= 6; i++) {
for (int j = 0; j < 3; j++) {
if ((queenPos[i] - i) == (queenPos[j] - j) || (queenPos[i] + i) == (queenPos[j] + j) || queenPos[i] == queenPos[j]) {
legal = false;
break;
}
}
if (!legal)
break;
}
if (!legal)
continue;
//if (blockIdx.x == 6 && blockIdx.y == 11 && threadIdx.x == 9 && threadIdx.y == 12)
// printf("1_%d %d %d %d %d %d %d_%d\n", queenPos[0], queenPos[1], queenPos[2], queenPos[3], queenPos[4], queenPos[5], queenPos[6], localResult);
//printf("1_%d %d %d %d %d %d %d\n", thisThread, queenPos[2], blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, data[thisThread]);
//backtrace
int posNow = 7;
queenPos[posNow] = -1;
while (posNow > 6) {
queenPos[posNow] ++;
while (queenPos[posNow] < QUEENS) {
legal = true;
for (int j = posNow - 1; j >= 0; j--) {
if ((queenPos[posNow] - posNow) == (queenPos[j] - j) || (queenPos[posNow] + posNow) == (queenPos[j] + j) || queenPos[posNow] == queenPos[j]) {
legal = false;
break;
}
}
if (!legal)
queenPos[posNow] ++;
else
break;
}
if (queenPos[posNow] < QUEENS) {
if (posNow == (QUEENS - 1)) {
localResult++;
//if (blockIdx.x == 6 && blockIdx.y == 11 && threadIdx.x == 9 && threadIdx.y == 12)
// printf("2_%d %d %d %d %d %d %d_%d\n", queenPos[7], queenPos[8], queenPos[9], queenPos[10], queenPos[11], queenPos[12], queenPos[13], localResult);
posNow--;
}
else {
posNow++;
queenPos[posNow] = -1;
}
}
else
posNow--;
}
}
//if (blockIdx.x == 6 && blockIdx.y == 11 && threadIdx.x == 9 && threadIdx.y == 12)
// printf("2.5_%d\n", localResult);
data[thisThread] = localResult;
//if (blockIdx.x == 6 && blockIdx.y == 11 && threadIdx.x == 9 && threadIdx.y == 12)
// printf("3_%d %d %d %d %d %d\n", thisThread, blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, data[thisThread]);
}
__host__ void initData(int* data) {
for (int i = 0; i < QUEENS*QUEENS*QUEENS*QUEENS; i++)
data[i] = 0;
}
__host__ int NQueens(int seedLower, int seedUpper)
{
clock_t start, mid1, mid2, end;
int resultHere = 0;
int* d_FQP;
std::vector <int> frontQueenPosV;
int *frontQueenPos;
int *tempFrontQueensPos = new int[3];
int* d_data;
int data[QUEENS*QUEENS*QUEENS*QUEENS];
int totalResult = 0;
initData(data);
int seedFrom = seedLower;
int seedTo = seedUpper;
start = clock();
if (seedTo < seedFrom)
return 0;
if (seedTo > QUEENS * QUEENS * QUEENS)
seedTo = QUEENS * QUEENS * QUEENS;
if (seedFrom < 0)
seedFrom = 0;
for (int i = seedFrom; i < seedTo; i++) {
tempFrontQueensPos[0] = i / QUEENS / QUEENS;
tempFrontQueensPos[1] = i / QUEENS % QUEENS;
tempFrontQueensPos[2] = i % QUEENS;
if ((tempFrontQueensPos[0] - 0) == (tempFrontQueensPos[1] - 1) || (tempFrontQueensPos[0] + 0) == (tempFrontQueensPos[1] + 1) || tempFrontQueensPos[0] == tempFrontQueensPos[1])
continue;
if ((tempFrontQueensPos[2] - 2) == (tempFrontQueensPos[1] - 1) || (tempFrontQueensPos[2] + 2) == (tempFrontQueensPos[1] + 1) || tempFrontQueensPos[2] == tempFrontQueensPos[1])
continue;
if ((tempFrontQueensPos[0] - 0) == (tempFrontQueensPos[2] - 2) || (tempFrontQueensPos[0] + 0) == (tempFrontQueensPos[2] + 2) || tempFrontQueensPos[0] == tempFrontQueensPos[2])
continue;
frontQueenPosV.push_back(tempFrontQueensPos[0]);
frontQueenPosV.push_back(tempFrontQueensPos[1]);
frontQueenPosV.push_back(tempFrontQueensPos[2]);
}
//printf("%d\n", frontQueenPosV.size());
frontQueenPos = new int[frontQueenPosV.size()];
if (!frontQueenPosV.empty())
memcpy(frontQueenPos, &frontQueenPosV[0], frontQueenPosV.size() * sizeof(int));
else
return 0;
int numFQP = frontQueenPosV.size();
int* d_numFQP;
mid1 = clock();
hipMalloc((void**)&d_data, QUEENS*QUEENS*QUEENS*QUEENS * sizeof(int));
hipMalloc((void**)&d_FQP, frontQueenPosV.size() * sizeof(int));
hipMalloc((void**)&d_numFQP, sizeof(int));
hipMemcpy(d_data, data, QUEENS*QUEENS*QUEENS*QUEENS * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_FQP, frontQueenPos, frontQueenPosV.size() * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_numFQP, &numFQP, sizeof(int), hipMemcpyHostToDevice);
dim3 blocksPerGrid(QUEENS, QUEENS, 1);
dim3 threadsPerBlock(QUEENS, QUEENS, 1);
mid1 = clock();
//hipMemcpy(d_FQP, frontQueensPos, (QUEENS - 11) * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( countQueens) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, d_FQP, d_data, d_numFQP);
/*
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
printf(hipGetErrorString(error));
exit(EXIT_FAILURE);
}*/
hipMemcpy(data, d_data, QUEENS*QUEENS*QUEENS*QUEENS * sizeof(int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
mid2 = clock();
for (int dNum = 0; dNum < QUEENS*QUEENS*QUEENS*QUEENS; dNum++)
totalResult += data[dNum];
hipFree(d_data);
hipFree(d_FQP);
end = clock();
printf("%d__%d, %d, %d\n", totalResult, mid1 - start, mid2 - mid1, end-mid2);
return totalResult;
} | d04de9d47251fd6e5548e9002f06959deb2f395d.cu | #include <cstdio>
#include <time.h>
#include <vector>
#define QUEENS (14)
__global__ void countQueens(int* frontQueensPos, int* data, int* numFQP)
{
int localResult = 0;
//printf("%d\n", numFQP[0]);
int thisThread = ((blockIdx.x * gridDim.x + blockIdx.y) * gridDim.y + threadIdx.x)* blockDim.x + threadIdx.y;
// printf("1_%d %d %d %d %d %d %d %d\n", thisThread, blockIdx.x, gridDim.x, blockIdx.y, gridDim.y, threadIdx.x, blockDim.x, threadIdx.y);
// if (thisThread >= QUEENS * QUEENS * QUEENS * QUEENS)
// return;
if (blockIdx.x >= QUEENS || blockIdx.y >= QUEENS || threadIdx.x >= QUEENS || threadIdx.y >= QUEENS)
return;
int* queenPos = new int[QUEENS];
queenPos[3] = blockIdx.x;
queenPos[4] = blockIdx.y;
queenPos[5] = threadIdx.x;
queenPos[6] = threadIdx.y;
for (int i = 4; i <= 6; i++) {
for (int j = 3; j < i; j++) {
if ((queenPos[i] - i) == (queenPos[j] - j) || (queenPos[i] + i) == (queenPos[j] + j) || queenPos[i] == queenPos[j]) {
return;
}
}
}
int totalFQP = numFQP[0] / 3;
for (int FQP_number = 0; FQP_number < totalFQP; FQP_number++) {
// printf("1_%d %d %d %d %d %d %d %d\n", thisThread, blockIdx.x, gridDim.x, blockIdx.y, gridDim.y, threadIdx.x, blockDim.x, threadIdx.y);
// if (thisThread >= QUEENS * QUEENS * QUEENS * QUEENS)
// return;
for (int i = 0; i < 3; i++)
queenPos[i] = frontQueensPos[(FQP_number * 3) + i];
bool legal = true;
//if (blockIdx.x == 6 && blockIdx.y == 11 && threadIdx.x == 9 && threadIdx.y == 12)
// printf("1_%d %d %d %d %d %d %d_%d\n", queenPos[0], queenPos[1], queenPos[2], queenPos[3], queenPos[4], queenPos[5], queenPos[6], totalFQP);
for (int i = 3; i <= 6; i++) {
for (int j = 0; j < 3; j++) {
if ((queenPos[i] - i) == (queenPos[j] - j) || (queenPos[i] + i) == (queenPos[j] + j) || queenPos[i] == queenPos[j]) {
legal = false;
break;
}
}
if (!legal)
break;
}
if (!legal)
continue;
//if (blockIdx.x == 6 && blockIdx.y == 11 && threadIdx.x == 9 && threadIdx.y == 12)
// printf("1_%d %d %d %d %d %d %d_%d\n", queenPos[0], queenPos[1], queenPos[2], queenPos[3], queenPos[4], queenPos[5], queenPos[6], localResult);
//printf("1_%d %d %d %d %d %d %d\n", thisThread, queenPos[2], blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, data[thisThread]);
//backtrace
int posNow = 7;
queenPos[posNow] = -1;
while (posNow > 6) {
queenPos[posNow] ++;
while (queenPos[posNow] < QUEENS) {
legal = true;
for (int j = posNow - 1; j >= 0; j--) {
if ((queenPos[posNow] - posNow) == (queenPos[j] - j) || (queenPos[posNow] + posNow) == (queenPos[j] + j) || queenPos[posNow] == queenPos[j]) {
legal = false;
break;
}
}
if (!legal)
queenPos[posNow] ++;
else
break;
}
if (queenPos[posNow] < QUEENS) {
if (posNow == (QUEENS - 1)) {
localResult++;
//if (blockIdx.x == 6 && blockIdx.y == 11 && threadIdx.x == 9 && threadIdx.y == 12)
// printf("2_%d %d %d %d %d %d %d_%d\n", queenPos[7], queenPos[8], queenPos[9], queenPos[10], queenPos[11], queenPos[12], queenPos[13], localResult);
posNow--;
}
else {
posNow++;
queenPos[posNow] = -1;
}
}
else
posNow--;
}
}
//if (blockIdx.x == 6 && blockIdx.y == 11 && threadIdx.x == 9 && threadIdx.y == 12)
// printf("2.5_%d\n", localResult);
data[thisThread] = localResult;
//if (blockIdx.x == 6 && blockIdx.y == 11 && threadIdx.x == 9 && threadIdx.y == 12)
// printf("3_%d %d %d %d %d %d\n", thisThread, blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, data[thisThread]);
}
__host__ void initData(int* data) {
for (int i = 0; i < QUEENS*QUEENS*QUEENS*QUEENS; i++)
data[i] = 0;
}
__host__ int NQueens(int seedLower, int seedUpper)
{
clock_t start, mid1, mid2, end;
int resultHere = 0;
int* d_FQP;
std::vector <int> frontQueenPosV;
int *frontQueenPos;
int *tempFrontQueensPos = new int[3];
int* d_data;
int data[QUEENS*QUEENS*QUEENS*QUEENS];
int totalResult = 0;
initData(data);
int seedFrom = seedLower;
int seedTo = seedUpper;
start = clock();
if (seedTo < seedFrom)
return 0;
if (seedTo > QUEENS * QUEENS * QUEENS)
seedTo = QUEENS * QUEENS * QUEENS;
if (seedFrom < 0)
seedFrom = 0;
for (int i = seedFrom; i < seedTo; i++) {
tempFrontQueensPos[0] = i / QUEENS / QUEENS;
tempFrontQueensPos[1] = i / QUEENS % QUEENS;
tempFrontQueensPos[2] = i % QUEENS;
if ((tempFrontQueensPos[0] - 0) == (tempFrontQueensPos[1] - 1) || (tempFrontQueensPos[0] + 0) == (tempFrontQueensPos[1] + 1) || tempFrontQueensPos[0] == tempFrontQueensPos[1])
continue;
if ((tempFrontQueensPos[2] - 2) == (tempFrontQueensPos[1] - 1) || (tempFrontQueensPos[2] + 2) == (tempFrontQueensPos[1] + 1) || tempFrontQueensPos[2] == tempFrontQueensPos[1])
continue;
if ((tempFrontQueensPos[0] - 0) == (tempFrontQueensPos[2] - 2) || (tempFrontQueensPos[0] + 0) == (tempFrontQueensPos[2] + 2) || tempFrontQueensPos[0] == tempFrontQueensPos[2])
continue;
frontQueenPosV.push_back(tempFrontQueensPos[0]);
frontQueenPosV.push_back(tempFrontQueensPos[1]);
frontQueenPosV.push_back(tempFrontQueensPos[2]);
}
//printf("%d\n", frontQueenPosV.size());
frontQueenPos = new int[frontQueenPosV.size()];
if (!frontQueenPosV.empty())
memcpy(frontQueenPos, &frontQueenPosV[0], frontQueenPosV.size() * sizeof(int));
else
return 0;
int numFQP = frontQueenPosV.size();
int* d_numFQP;
mid1 = clock();
cudaMalloc((void**)&d_data, QUEENS*QUEENS*QUEENS*QUEENS * sizeof(int));
cudaMalloc((void**)&d_FQP, frontQueenPosV.size() * sizeof(int));
cudaMalloc((void**)&d_numFQP, sizeof(int));
cudaMemcpy(d_data, data, QUEENS*QUEENS*QUEENS*QUEENS * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_FQP, frontQueenPos, frontQueenPosV.size() * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_numFQP, &numFQP, sizeof(int), cudaMemcpyHostToDevice);
dim3 blocksPerGrid(QUEENS, QUEENS, 1);
dim3 threadsPerBlock(QUEENS, QUEENS, 1);
mid1 = clock();
//cudaMemcpy(d_FQP, frontQueensPos, (QUEENS - 11) * sizeof(int), cudaMemcpyHostToDevice);
countQueens <<< blocksPerGrid, threadsPerBlock >>> (d_FQP, d_data, d_numFQP);
/*
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
printf(cudaGetErrorString(error));
exit(EXIT_FAILURE);
}*/
cudaMemcpy(data, d_data, QUEENS*QUEENS*QUEENS*QUEENS * sizeof(int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
mid2 = clock();
for (int dNum = 0; dNum < QUEENS*QUEENS*QUEENS*QUEENS; dNum++)
totalResult += data[dNum];
cudaFree(d_data);
cudaFree(d_FQP);
end = clock();
printf("%d__%d, %d, %d\n", totalResult, mid1 - start, mid2 - mid1, end-mid2);
return totalResult;
} |
dd8518778fc631c5fc1cb6f4a65cb5468a8b9e20.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* This sample is a templatized version of the template project.
* It also shows how to correctly templatize dynamically allocated shared
* memory arrays.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
#include <windows.h>
#endif
#include <iostream>
#include <fstream>
#include <string>
#include <time.h>
#include <assert.h>
// includes, project
#include <cutil_inline.h>
#include <shrQATest.h>
// includes for cuda auto-complete suport
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
// includes, kernels
#include "simpleTemplates_kernel.cu"
// include timer function
#include "time_ms_64.hpp"
/////////////////////////////////////////////////////////////////////////////////
// Includes / Declerations for AES GPU code
/////////////////////////////////////////////////////////////////////////////////
#include "Rijndael_GPU.cuh"
#define _GPU
//#define _SLOW
/////////////////////////////////////////////////////////////////////////////////
// Includes / Declerations for AES host code
/////////////////////////////////////////////////////////////////////////////////
#include "Rijndael.h"
#include "AES.h"
using namespace std;
//#define __TimingTests
// define this to test old direct slow method, else remove for fast method
//#define _SLOW
#define RANDOM_TEST_COUNT 50 // how many random tests to do
////////////////////////////////////////////////////////////////////////////////
#ifdef _SLOW
#define CRYPT Rijndael
#else
#ifdef _GPU
#define CRYPT Rijndael_GPU
#endif
#endif
typedef struct
{
char * key;
char * plaintext;
char * ciphertext;
char * e_vectors[9];
char * d_vectors[9];
} test_t;
// todo - add all checks from the NIST document in the header comment
test_t vectors[] = {
// a test vector from NIST
{ "000102030405060708090A0B0C0D0E0F", // key
"000102030405060708090A0B0C0D0E0F", // plaintext
"0A940BB5416EF045F1C39458C653EA5A", // ciphertext
{"B5C9179EB1CC1199B9C51B92B5C8159D", // encryption vectors
"2B65F6374C427C5B2FE3A9256896755B",
"D1015FCBB4EF65679688462076B9D6AD",
"8E17064A2A35A183729FE59FF3A591F1",
"D7557DD55999DB3259E2183D558DCDD2",
"73A96A5D7799A5F3111D2B63684B1F7F",
"1B6B853069EEFC749AFEFD7B57A04CD1",
"107EEADFB6F77933B5457A6F08F046B2",
"8EC166481A677AA96A14FF6ECE88C010"},
{"8EC166481A677AA96A14FF6ECE88C010", // decryption vectors
"107EEADFB6F77933B5457A6F08F046B2",
"1B6B853069EEFC749AFEFD7B57A04CD1",
"73A96A5D7799A5F3111D2B63684B1F7F",
"D7557DD55999DB3259E2183D558DCDD2",
"8E17064A2A35A183729FE59FF3A591F1",
"D1015FCBB4EF65679688462076B9D6AD",
"2B65F6374C427C5B2FE3A9256896755B",
"B5C9179EB1CC1199B9C51B92B5C8159D"}},
// a bunch of different test values
{"00010203050607080A0B0C0D0F101112",
"506812A45F08C889B97F5980038B8359",
"D8F532538289EF7D06B506A4FD5BE9C9", {0},{0}},
{"00010203050607080A0B0C0D0F10111214151617191A1B1C",
"2D33EEF2C0430A8A9EBF45E809C40BB6",
"DFF4945E0336DF4C1C56BC700EFF837F", {0},{0}},
{"50515253555657585A5B5C5D5F60616264656667696A6B6C6E6F707173747576",
"050407067477767956575051221D1C1F",
"7444527095838FE080FC2BCDD30847EB", {0},{0}},
{"000000000000000000000000000000000200000000000000",
"00000000000000000000000000000000",
"5D989E122B78C758921EDBEEB827F0C0",{0},{0}},
};
/////////////////////////////////////////////////////////////////////////////////
int g_TotalFailures = 0;
// declaration, forward
template <class T>
void runTest( int argc, char** argv, int len);
template<class T>
void
computeGold( T* reference, T* idata, const unsigned int len)
{
const T T_len = static_cast<T>( len);
for( unsigned int i = 0; i < len; ++i)
{
reference[i] = idata[i] * T_len;
}
}
////////////////////////////////////////////////////////////////////////////////
// aes main methods
////////////////////////////////////////////////////////////////////////////////
void TextToHex(const char * in, char * data)
{
// given a text string, convert to hex data
int val;
while (*in)
{
val = *in++;
if (val > '9')
val = toupper(val) - 'A' + 10;
else
val = val - '0';
*data = val*16;
val = *in++;
if (val > '9')
val = toupper(val) - 'A' + 10;
else
val = val - '0';
*data++ += val;
}
} // TextToHex
// test a given test vector, see that internals are working
// return false iff fails
bool TestVector(const test_t & vector, bool use_states)
{
bool retval = true; // assume passes
// data sizes in bytes
int keylen = strlen(vector.key)/2, blocklen = strlen(vector.plaintext)/2;
CRYPT crypt;
crypt.SetParameters(keylen*8,blocklen*8);
unsigned char key[32], plaintext[32],ciphertext[32],temptext[32];
unsigned char states[4096*20];
TextToHex(vector.key,reinterpret_cast<char*>(key));
TextToHex(vector.ciphertext,reinterpret_cast<char*>(ciphertext));
TextToHex(vector.plaintext,reinterpret_cast<char*>(plaintext));
if (use_states == true)
for (int pos = 0; pos < 9; pos++)
TextToHex(vector.e_vectors[pos],reinterpret_cast<char*>(states)+pos*16);
crypt.StartEncryption(key);
#ifdef _SLOW
if (use_states == true)
crypt.EncryptBlock(plaintext,temptext,states);
else
#endif
crypt.EncryptBlock(plaintext,temptext);
// check that temp = cipher
if (memcmp(ciphertext,temptext,blocklen) != 0)
{
cout << "Error: encryption error\n";
retval = false;
}
else
cout << "Encryption passed\n";
crypt.StartDecryption(key);
#ifdef _SLOW
if (use_states == true)
crypt.DecryptBlock(ciphertext,temptext,states);
else
#endif
crypt.DecryptBlock(ciphertext,temptext);
if (memcmp(plaintext,temptext,blocklen) != 0)
{
cout << "Error: decryption error\n";
retval = false;
}
else
cout << "Decryption passed\n";
return retval;
} // TestVector
// return false iff 2 byte end values not preserved
bool CheckBuffer(const unsigned char * buf, int length)
{
return (0xBE == buf[0]) && (0xEF == buf[1]) &&
(0xBE == buf[length+2]) && (0xEF == buf[length+3]);
} // CheckBuffer
// return false iff fails
bool RandomTest(int pos)
{
// data sizes in bytes
int keylen, blocklen,datalen,mode;
keylen = (rand()%3)*8 + 16;
//blocklen = (rand()%3)*8 + 16;
blocklen = 16;
//mode = rand()%2; // various chaining modes
mode = 0; // ECB only!!
assert((16 == keylen) || (24 == keylen) || (32 == keylen));
assert((16 == blocklen) || (24 == blocklen) || (32 == blocklen));
#define MAXDATA 4096 // max length of random data
CRYPT crypt;
crypt.SetParameters(keylen*8,blocklen*8);
datalen = rand()%MAXDATA;
unsigned char key[32], plaintext[MAXDATA+40],ciphertext[MAXDATA+40],temptext[MAXDATA+40];
cout << "Test: " << pos+1 << " (keysize,blocksize,datalength): (" << keylen << ',' << blocklen << "," << datalen << ")\n";
for (pos = 0; pos < keylen; pos++)
key[pos] = rand();
// add buffer bytes to each end to catch errors
plaintext[0] = 0xBE; plaintext[1] = 0xEF;
ciphertext[0] = 0xBE; ciphertext[1] = 0xEF;
temptext[0] = 0xBE; temptext[1] = 0xEF;
for (pos = 0; pos < datalen; pos++)
plaintext[pos+2] = rand();
// pad
int padlen = blocklen - (datalen%blocklen);
for (pos = 0; pos < padlen; pos++)
plaintext[pos+2+datalen] = 0;
// add buffer bytes to each end to catch errors
pos = padlen+2+datalen;
plaintext[pos] = 0xBE; plaintext[pos+1] = 0xEF;
ciphertext[pos] = 0xBE; ciphertext[pos+1] = 0xEF;
temptext[pos] = 0xBE; temptext[pos+1] = 0xEF;
#undef MAXDATA
int blocks = (datalen + blocklen-1)/blocklen;
crypt.StartEncryption(key);
crypt.Encrypt(plaintext+2,ciphertext+2,blocks,static_cast<CRYPT::BlockMode>(mode));
crypt.StartDecryption(key);
crypt.Decrypt(ciphertext+2,temptext+2,blocks,static_cast<CRYPT::BlockMode>(mode));
if (memcmp(plaintext+2,temptext+2,datalen) != 0)
{
cout << "Error: decryption error\n";
return false;
}
else if ((false == CheckBuffer(plaintext,datalen+padlen)) ||
(false == CheckBuffer(temptext,datalen+padlen)) ||
(false == CheckBuffer(ciphertext,datalen+padlen)))
{
cout << "Error: buffer overflow\n";
return false;
}
else
cout << "Decryption passed\n";
return true;
} // RandomTest
static __declspec(naked) __int64 GetCounter()
{ // read time stamp counter in Pentium class CPUs
_asm
{
_emit 0fh
_emit 31h
ret;
}
} //
void Timing(int rounds, int keylen, int blocklen)
{
__int64 start1, end1, start2, end2, overhead;
unsigned char key[32],plaintext[32],ciphertext[32];
int pos;
CRYPT crypt;
crypt.SetParameters(keylen*8,blocklen*8);
srand(0); // make repeatable
for (pos = 0; pos < keylen; pos++)
key[pos] = rand();
for (pos = 0; pos < blocklen; pos++)
plaintext[pos] = rand();
// find overhead for these
start1 = GetCounter();
end1 = GetCounter();
overhead = end1 - start1;
crypt.StartEncryption(key);
unsigned long min_e = 1000000;
double total_e = 0;
for (pos = 0; pos < rounds; pos++)
{
#ifdef _GPU
//printf(".");
#endif
start1 = GetCounter();
crypt.EncryptBlock(plaintext,ciphertext);
end1 = GetCounter();
total_e += end1-start1-overhead;
if (min_e > (end1-start1-overhead))
min_e = static_cast<unsigned long>(end1-start1-overhead);
}
cout << "Min cycles per encryption (key,block): (" << keylen*8 << ',' << blocklen*8 << ") ";
cout << min_e << endl;
cout << "Avg cycles per encryption (key,block): (" << keylen*8 << ',' << blocklen*8 << ") ";
cout << total_e/rounds << endl;
crypt.StartDecryption(key);
unsigned long min_d = 1000000;
double total_d = 0;
for (pos = 0; pos < rounds; pos++)
{
#ifdef _GPU
//printf(".");
#endif
start2 = GetCounter();
crypt.DecryptBlock(plaintext,ciphertext);
end2 = GetCounter();
total_d += end2-start2-overhead;
if (min_d > (end2-start2-overhead))
min_d = static_cast<unsigned long>(end2-start2-overhead);
}
cout << "Min cycles per decryption (key,block): (" << keylen*8 << ',' << blocklen*8 << ") ";
cout << min_d << endl;
cout << "Avg cycles per decryption (key,block): (" << keylen*8 << ',' << blocklen*8 << ") ";
cout << total_d/rounds << endl;
} // Timing
// test a file encryption
void AESEncryptFile(string path)
{
#define CPU_CRYPT Rijndael
#define GPU_CRYPT Rijndael_GPU
string ext = path.substr(path.find_last_of(".") + 1);
string fname;
string dir = "";
size_t pos = path.find_last_of("\\");
if(pos != std::string::npos){
fname.assign(path.begin() + pos + 1, path.end() - ext.length() - 1);
dir.assign(path.begin(),path.begin() + pos + 1);
} else {
fname.assign(path.begin(), path.end() - ext.length() - 1);
}
string outFileCPU = dir + fname + "_cpu_out." + ext;
string outFileGPU = dir + fname + "_gpu_out." + ext;
ifstream ifile(path,ios_base::binary);
ofstream ofileCPU(outFileCPU,ios_base::binary);
ofstream ofileGPU(outFileGPU,ios_base::binary);
// get file size
ifile.seekg(0,ios_base::end);
int size,fsize = ifile.tellg();
ifile.seekg(0,ios_base::beg);
// round up (ignore pad for here)
size = (fsize+15)&(~15);
char * ibuffer = new char[size];
char * obuffer = new char[size];
ifile.read(ibuffer,fsize);
GPU_CRYPT gpu_crypt;
CPU_CRYPT cpu_crypt;
gpu_crypt.SetParameters(192);
cpu_crypt.SetParameters(192);
// random key good enough
unsigned char key[192/8];
for (int pos = 0; pos < sizeof(key); ++pos)
key[pos] = rand();
gpu_crypt.StartEncryption(key);
cpu_crypt.StartEncryption(key);
gpu_crypt.Encrypt(reinterpret_cast<const unsigned char*>(ibuffer),reinterpret_cast<unsigned char*>(obuffer),size/16,static_cast<GPU_CRYPT::BlockMode>(0));
UINT64 cpuTime,gpuTime;
INT64 start, finish;
printf("CPU decryption:\n");
start = GetTimeMs64();
cpu_crypt.Decrypt(reinterpret_cast<const unsigned char*>(obuffer),reinterpret_cast<unsigned char*>(ibuffer),size/16,static_cast<CPU_CRYPT::BlockMode>(0));
finish = GetTimeMs64();
ofileCPU.write(ibuffer,size);
cpuTime = finish - start;
printf("CPU processing time: %d (ms)\n",cpuTime);
printf("GPU decryption:\n");
start = GetTimeMs64();
gpu_crypt.Decrypt(reinterpret_cast<const unsigned char*>(obuffer),reinterpret_cast<unsigned char*>(ibuffer),size/16,static_cast<CRYPT::BlockMode>(0));
finish = GetTimeMs64();
ofileGPU.write(ibuffer,size);
gpuTime = finish - start;
//printf("GPU processing time: %d (ms)\n",gpuTime);
printf("Total Speedup: %f\n", (double)cpuTime/gpuTime);
delete [] ibuffer;
delete [] obuffer;
ofileCPU.close();
ofileGPU.close();
ifile.close();
} // AESEncryptFile
////////////////////////////////////////////////////////////////////////////////
// AES main
////////////////////////////////////////////////////////////////////////////////
int aes_main(int argc,char** argv)
{
string filename = "";
if(argc>2){
printf("usage: %s [<input-file>]\n",argv[0]);
exit(0);
}
if(argc==2)
filename.assign(argv[1]);
#ifdef _WIN32
// to try to prevent windows from interfering too much
SetPriorityClass(GetCurrentProcess(),HIGH_PRIORITY_CLASS);
SetThreadPriority(GetCurrentThread(),THREAD_PRIORITY_TIME_CRITICAL);
#endif
bool allPassed = true; // asusme this
// some test vectors to check integrity
allPassed &= TestVector(vectors[0],true);
allPassed &= TestVector(vectors[1],false);
allPassed &= TestVector(vectors[2],false);
allPassed &= TestVector(vectors[3],false);
allPassed &= TestVector(vectors[4],false);
#ifdef __TimingTests
// check a bunch of timings for different key and block sizes
//for (int block = 16; block <= 32; block += 8)
for (int key = 16; key <= 32; key += 8)
Timing(100000,key,16);
#endif
// this is to randomly test data
srand(0); // make reproducible
cout << "Random tests:\n";
for (int pos = 0; pos < RANDOM_TEST_COUNT; pos++)
{
bool passed = RandomTest(pos);
allPassed &= passed;
if (passed == false)
cerr << "Random Test " << pos << " failed\n";
}
if (false == allPassed)
cerr << "ERROR: Some test(s) failed\n";
else
cout << "PASSED: All tests passed\n";
// test a file encryption
ifstream ifile(filename);
if (ifile) {
ifile.close();
cout << "\n\nencrypting file test:\n";
AESEncryptFile(filename);
}
cout << "\n\n";
return 0;
} // aes_main
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
aes_main(argc,argv);
/*
shrQAStart(argc, argv);
printf("> runTest<float,32>\n");
runTest<float>( argc, argv, 32);
printf("> runTest<int,64>\n");
runTest<int>( argc, argv, 64);
printf("\n[simpleTemplates] -> Test Results: %d Failures\n", g_TotalFailures);
cutilDeviceReset();
shrQAFinishExit(argc, (const char **)argv, (g_TotalFailures == 0) ? QA_PASSED : QA_FAILED);
*/
}
// To completely templatize runTest (below) with cutil, we need to use
// template specialization to wrap up CUTIL's array comparison and file writing
// functions for different types.
// Here's the generic wrapper for cutCompare*
template<class T>
class ArrayComparator
{
public:
CUTBoolean compare( const T* reference, T* data, unsigned int len)
{
fprintf(stderr, "Error: no comparison function implemented for this type\n");
return CUTFalse;
}
};
// Here's the specialization for ints:
template<>
class ArrayComparator<int>
{
public:
CUTBoolean compare( const int* reference, int* data, unsigned int len)
{
return cutComparei(reference, data, len);
}
};
// Here's the specialization for floats:
template<>
class ArrayComparator<float>
{
public:
CUTBoolean compare( const float* reference, float* data, unsigned int len)
{
return cutComparef(reference, data, len);
}
};
// Here's the generic wrapper for cutWriteFile*
template<class T>
class ArrayFileWriter
{
public:
CUTBoolean write(const char* filename, T* data, unsigned int len, float epsilon)
{
fprintf(stderr, "Error: no file write function implemented for this type\n");
return CUTFalse;
}
};
// Here's the specialization for ints:
template<>
class ArrayFileWriter<int>
{
public:
CUTBoolean write(const char* filename, int* data, unsigned int len, float epsilon)
{
return cutWriteFilei(filename, data, len, epsilon != 0);
}
};
// Here's the specialization for floats:
template<>
class ArrayFileWriter<float>
{
public:
CUTBoolean write(const char* filename, float* data, unsigned int len, float epsilon)
{
return cutWriteFilef(filename, data, len, epsilon);
}
};
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
template<class T>
void runTest( int argc, char** argv, int len)
{
int devID;
hipDeviceProp_t deviceProps;
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) {
devID = cutilDeviceInit(argc, argv);
if (devID < 0) {
printf("No CUDA Capable devices found, exiting...\n");
shrQAFinishExit(argc, (const char **)argv, QA_WAIVED);
}
}
else {
devID = cutGetMaxGflopsDeviceId();
hipSetDevice( devID );
}
// get number of SMs on this GPU
cutilSafeCall(hipGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s] has %d Multi-Processors\n", deviceProps.name, deviceProps.multiProcessorCount);
unsigned int timer = 0;
cutilCheckError( cutCreateTimer( &timer));
cutilCheckError( cutStartTimer( timer));
unsigned int num_threads = len;
unsigned int mem_size = sizeof( float) * num_threads;
// allocate host memory
T* h_idata = (T*) malloc( mem_size);
// initalize the memory
for( unsigned int i = 0; i < num_threads; ++i)
{
h_idata[i] = (T) i;
}
// allocate device memory
T* d_idata;
cutilSafeCall( hipMalloc( (void**) &d_idata, mem_size));
// copy host memory to device
cutilSafeCall( hipMemcpy( d_idata, h_idata, mem_size,
hipMemcpyHostToDevice) );
// allocate device memory for result
T* d_odata;
cutilSafeCall( hipMalloc( (void**) &d_odata, mem_size));
// setup execution parameters
dim3 grid( 1, 1, 1);
dim3 threads( num_threads, 1, 1);
printf("launching kernel...\n");
// execute the kernel
testKernel<T><<< grid, threads, mem_size >>>( d_idata, d_odata);
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
// allocate mem for the result on host side
T* h_odata = (T*) malloc( mem_size);
// copy result from device to host
cutilSafeCall( hipMemcpy( h_odata, d_odata, sizeof(T) * num_threads,
hipMemcpyDeviceToHost) );
cutilCheckError( cutStopTimer( timer));
printf( "Processing time: %f (ms)\n", cutGetTimerValue( timer));
cutilCheckError( cutDeleteTimer( timer));
// compute reference solution
T* reference = (T*) malloc( mem_size);
computeGold<T>( reference, h_idata, num_threads);
ArrayComparator<T> comparator;
ArrayFileWriter<T> writer;
// check result
if( cutCheckCmdLineFlag( argc, (const char**) argv, "regression"))
{
// write file for regression test
cutilCheckError( writer.write( "./data/regression.dat",
h_odata, num_threads, 0.0));
}
else
{
// custom output handling when no regression test running
// in this case check if the result is equivalent to the expected soluion
CUTBoolean res = comparator.compare( reference, h_odata, num_threads);
printf( "Compare %s\n\n", (1 == res) ? "OK" : "MISMATCH");
g_TotalFailures += (1 != res);
}
// cleanup memory
free( h_idata);
free( h_odata);
free( reference);
cutilSafeCall(hipFree(d_idata));
cutilSafeCall(hipFree(d_odata));
cutilDeviceReset();
}
| dd8518778fc631c5fc1cb6f4a65cb5468a8b9e20.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* This sample is a templatized version of the template project.
* It also shows how to correctly templatize dynamically allocated shared
* memory arrays.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
#include <windows.h>
#endif
#include <iostream>
#include <fstream>
#include <string>
#include <time.h>
#include <assert.h>
// includes, project
#include <cutil_inline.h>
#include <shrQATest.h>
// includes for cuda auto-complete suport
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// includes, kernels
#include "simpleTemplates_kernel.cu"
// include timer function
#include "time_ms_64.hpp"
/////////////////////////////////////////////////////////////////////////////////
// Includes / Declerations for AES GPU code
/////////////////////////////////////////////////////////////////////////////////
#include "Rijndael_GPU.cuh"
#define _GPU
//#define _SLOW
/////////////////////////////////////////////////////////////////////////////////
// Includes / Declerations for AES host code
/////////////////////////////////////////////////////////////////////////////////
#include "Rijndael.h"
#include "AES.h"
using namespace std;
//#define __TimingTests
// define this to test old direct slow method, else remove for fast method
//#define _SLOW
#define RANDOM_TEST_COUNT 50 // how many random tests to do
////////////////////////////////////////////////////////////////////////////////
#ifdef _SLOW
#define CRYPT Rijndael
#else
#ifdef _GPU
#define CRYPT Rijndael_GPU
#endif
#endif
typedef struct
{
char * key;
char * plaintext;
char * ciphertext;
char * e_vectors[9];
char * d_vectors[9];
} test_t;
// todo - add all checks from the NIST document in the header comment
test_t vectors[] = {
// a test vector from NIST
{ "000102030405060708090A0B0C0D0E0F", // key
"000102030405060708090A0B0C0D0E0F", // plaintext
"0A940BB5416EF045F1C39458C653EA5A", // ciphertext
{"B5C9179EB1CC1199B9C51B92B5C8159D", // encryption vectors
"2B65F6374C427C5B2FE3A9256896755B",
"D1015FCBB4EF65679688462076B9D6AD",
"8E17064A2A35A183729FE59FF3A591F1",
"D7557DD55999DB3259E2183D558DCDD2",
"73A96A5D7799A5F3111D2B63684B1F7F",
"1B6B853069EEFC749AFEFD7B57A04CD1",
"107EEADFB6F77933B5457A6F08F046B2",
"8EC166481A677AA96A14FF6ECE88C010"},
{"8EC166481A677AA96A14FF6ECE88C010", // decryption vectors
"107EEADFB6F77933B5457A6F08F046B2",
"1B6B853069EEFC749AFEFD7B57A04CD1",
"73A96A5D7799A5F3111D2B63684B1F7F",
"D7557DD55999DB3259E2183D558DCDD2",
"8E17064A2A35A183729FE59FF3A591F1",
"D1015FCBB4EF65679688462076B9D6AD",
"2B65F6374C427C5B2FE3A9256896755B",
"B5C9179EB1CC1199B9C51B92B5C8159D"}},
// a bunch of different test values
{"00010203050607080A0B0C0D0F101112",
"506812A45F08C889B97F5980038B8359",
"D8F532538289EF7D06B506A4FD5BE9C9", {0},{0}},
{"00010203050607080A0B0C0D0F10111214151617191A1B1C",
"2D33EEF2C0430A8A9EBF45E809C40BB6",
"DFF4945E0336DF4C1C56BC700EFF837F", {0},{0}},
{"50515253555657585A5B5C5D5F60616264656667696A6B6C6E6F707173747576",
"050407067477767956575051221D1C1F",
"7444527095838FE080FC2BCDD30847EB", {0},{0}},
{"000000000000000000000000000000000200000000000000",
"00000000000000000000000000000000",
"5D989E122B78C758921EDBEEB827F0C0",{0},{0}},
};
/////////////////////////////////////////////////////////////////////////////////
int g_TotalFailures = 0;
// declaration, forward
template <class T>
void runTest( int argc, char** argv, int len);
template<class T>
void
computeGold( T* reference, T* idata, const unsigned int len)
{
const T T_len = static_cast<T>( len);
for( unsigned int i = 0; i < len; ++i)
{
reference[i] = idata[i] * T_len;
}
}
////////////////////////////////////////////////////////////////////////////////
// aes main methods
////////////////////////////////////////////////////////////////////////////////
void TextToHex(const char * in, char * data)
{
// given a text string, convert to hex data
int val;
while (*in)
{
val = *in++;
if (val > '9')
val = toupper(val) - 'A' + 10;
else
val = val - '0';
*data = val*16;
val = *in++;
if (val > '9')
val = toupper(val) - 'A' + 10;
else
val = val - '0';
*data++ += val;
}
} // TextToHex
// test a given test vector, see that internals are working
// return false iff fails
bool TestVector(const test_t & vector, bool use_states)
{
bool retval = true; // assume passes
// data sizes in bytes
int keylen = strlen(vector.key)/2, blocklen = strlen(vector.plaintext)/2;
CRYPT crypt;
crypt.SetParameters(keylen*8,blocklen*8);
unsigned char key[32], plaintext[32],ciphertext[32],temptext[32];
unsigned char states[4096*20];
TextToHex(vector.key,reinterpret_cast<char*>(key));
TextToHex(vector.ciphertext,reinterpret_cast<char*>(ciphertext));
TextToHex(vector.plaintext,reinterpret_cast<char*>(plaintext));
if (use_states == true)
for (int pos = 0; pos < 9; pos++)
TextToHex(vector.e_vectors[pos],reinterpret_cast<char*>(states)+pos*16);
crypt.StartEncryption(key);
#ifdef _SLOW
if (use_states == true)
crypt.EncryptBlock(plaintext,temptext,states);
else
#endif
crypt.EncryptBlock(plaintext,temptext);
// check that temp = cipher
if (memcmp(ciphertext,temptext,blocklen) != 0)
{
cout << "Error: encryption error\n";
retval = false;
}
else
cout << "Encryption passed\n";
crypt.StartDecryption(key);
#ifdef _SLOW
if (use_states == true)
crypt.DecryptBlock(ciphertext,temptext,states);
else
#endif
crypt.DecryptBlock(ciphertext,temptext);
if (memcmp(plaintext,temptext,blocklen) != 0)
{
cout << "Error: decryption error\n";
retval = false;
}
else
cout << "Decryption passed\n";
return retval;
} // TestVector
// return false iff 2 byte end values not preserved
bool CheckBuffer(const unsigned char * buf, int length)
{
return (0xBE == buf[0]) && (0xEF == buf[1]) &&
(0xBE == buf[length+2]) && (0xEF == buf[length+3]);
} // CheckBuffer
// return false iff fails
bool RandomTest(int pos)
{
// data sizes in bytes
int keylen, blocklen,datalen,mode;
keylen = (rand()%3)*8 + 16;
//blocklen = (rand()%3)*8 + 16;
blocklen = 16;
//mode = rand()%2; // various chaining modes
mode = 0; // ECB only!!
assert((16 == keylen) || (24 == keylen) || (32 == keylen));
assert((16 == blocklen) || (24 == blocklen) || (32 == blocklen));
#define MAXDATA 4096 // max length of random data
CRYPT crypt;
crypt.SetParameters(keylen*8,blocklen*8);
datalen = rand()%MAXDATA;
unsigned char key[32], plaintext[MAXDATA+40],ciphertext[MAXDATA+40],temptext[MAXDATA+40];
cout << "Test: " << pos+1 << " (keysize,blocksize,datalength): (" << keylen << ',' << blocklen << "," << datalen << ")\n";
for (pos = 0; pos < keylen; pos++)
key[pos] = rand();
// add buffer bytes to each end to catch errors
plaintext[0] = 0xBE; plaintext[1] = 0xEF;
ciphertext[0] = 0xBE; ciphertext[1] = 0xEF;
temptext[0] = 0xBE; temptext[1] = 0xEF;
for (pos = 0; pos < datalen; pos++)
plaintext[pos+2] = rand();
// pad
int padlen = blocklen - (datalen%blocklen);
for (pos = 0; pos < padlen; pos++)
plaintext[pos+2+datalen] = 0;
// add buffer bytes to each end to catch errors
pos = padlen+2+datalen;
plaintext[pos] = 0xBE; plaintext[pos+1] = 0xEF;
ciphertext[pos] = 0xBE; ciphertext[pos+1] = 0xEF;
temptext[pos] = 0xBE; temptext[pos+1] = 0xEF;
#undef MAXDATA
int blocks = (datalen + blocklen-1)/blocklen;
crypt.StartEncryption(key);
crypt.Encrypt(plaintext+2,ciphertext+2,blocks,static_cast<CRYPT::BlockMode>(mode));
crypt.StartDecryption(key);
crypt.Decrypt(ciphertext+2,temptext+2,blocks,static_cast<CRYPT::BlockMode>(mode));
if (memcmp(plaintext+2,temptext+2,datalen) != 0)
{
cout << "Error: decryption error\n";
return false;
}
else if ((false == CheckBuffer(plaintext,datalen+padlen)) ||
(false == CheckBuffer(temptext,datalen+padlen)) ||
(false == CheckBuffer(ciphertext,datalen+padlen)))
{
cout << "Error: buffer overflow\n";
return false;
}
else
cout << "Decryption passed\n";
return true;
} // RandomTest
static __declspec(naked) __int64 GetCounter()
{ // read time stamp counter in Pentium class CPUs
_asm
{
_emit 0fh
_emit 31h
ret;
}
} //
void Timing(int rounds, int keylen, int blocklen)
{
__int64 start1, end1, start2, end2, overhead;
unsigned char key[32],plaintext[32],ciphertext[32];
int pos;
CRYPT crypt;
crypt.SetParameters(keylen*8,blocklen*8);
srand(0); // make repeatable
for (pos = 0; pos < keylen; pos++)
key[pos] = rand();
for (pos = 0; pos < blocklen; pos++)
plaintext[pos] = rand();
// find overhead for these
start1 = GetCounter();
end1 = GetCounter();
overhead = end1 - start1;
crypt.StartEncryption(key);
unsigned long min_e = 1000000;
double total_e = 0;
for (pos = 0; pos < rounds; pos++)
{
#ifdef _GPU
//printf(".");
#endif
start1 = GetCounter();
crypt.EncryptBlock(plaintext,ciphertext);
end1 = GetCounter();
total_e += end1-start1-overhead;
if (min_e > (end1-start1-overhead))
min_e = static_cast<unsigned long>(end1-start1-overhead);
}
cout << "Min cycles per encryption (key,block): (" << keylen*8 << ',' << blocklen*8 << ") ";
cout << min_e << endl;
cout << "Avg cycles per encryption (key,block): (" << keylen*8 << ',' << blocklen*8 << ") ";
cout << total_e/rounds << endl;
crypt.StartDecryption(key);
unsigned long min_d = 1000000;
double total_d = 0;
for (pos = 0; pos < rounds; pos++)
{
#ifdef _GPU
//printf(".");
#endif
start2 = GetCounter();
crypt.DecryptBlock(plaintext,ciphertext);
end2 = GetCounter();
total_d += end2-start2-overhead;
if (min_d > (end2-start2-overhead))
min_d = static_cast<unsigned long>(end2-start2-overhead);
}
cout << "Min cycles per decryption (key,block): (" << keylen*8 << ',' << blocklen*8 << ") ";
cout << min_d << endl;
cout << "Avg cycles per decryption (key,block): (" << keylen*8 << ',' << blocklen*8 << ") ";
cout << total_d/rounds << endl;
} // Timing
// test a file encryption
void AESEncryptFile(string path)
{
#define CPU_CRYPT Rijndael
#define GPU_CRYPT Rijndael_GPU
string ext = path.substr(path.find_last_of(".") + 1);
string fname;
string dir = "";
size_t pos = path.find_last_of("\\");
if(pos != std::string::npos){
fname.assign(path.begin() + pos + 1, path.end() - ext.length() - 1);
dir.assign(path.begin(),path.begin() + pos + 1);
} else {
fname.assign(path.begin(), path.end() - ext.length() - 1);
}
string outFileCPU = dir + fname + "_cpu_out." + ext;
string outFileGPU = dir + fname + "_gpu_out." + ext;
ifstream ifile(path,ios_base::binary);
ofstream ofileCPU(outFileCPU,ios_base::binary);
ofstream ofileGPU(outFileGPU,ios_base::binary);
// get file size
ifile.seekg(0,ios_base::end);
int size,fsize = ifile.tellg();
ifile.seekg(0,ios_base::beg);
// round up (ignore pad for here)
size = (fsize+15)&(~15);
char * ibuffer = new char[size];
char * obuffer = new char[size];
ifile.read(ibuffer,fsize);
GPU_CRYPT gpu_crypt;
CPU_CRYPT cpu_crypt;
gpu_crypt.SetParameters(192);
cpu_crypt.SetParameters(192);
// random key good enough
unsigned char key[192/8];
for (int pos = 0; pos < sizeof(key); ++pos)
key[pos] = rand();
gpu_crypt.StartEncryption(key);
cpu_crypt.StartEncryption(key);
gpu_crypt.Encrypt(reinterpret_cast<const unsigned char*>(ibuffer),reinterpret_cast<unsigned char*>(obuffer),size/16,static_cast<GPU_CRYPT::BlockMode>(0));
UINT64 cpuTime,gpuTime;
INT64 start, finish;
printf("CPU decryption:\n");
start = GetTimeMs64();
cpu_crypt.Decrypt(reinterpret_cast<const unsigned char*>(obuffer),reinterpret_cast<unsigned char*>(ibuffer),size/16,static_cast<CPU_CRYPT::BlockMode>(0));
finish = GetTimeMs64();
ofileCPU.write(ibuffer,size);
cpuTime = finish - start;
printf("CPU processing time: %d (ms)\n",cpuTime);
printf("GPU decryption:\n");
start = GetTimeMs64();
gpu_crypt.Decrypt(reinterpret_cast<const unsigned char*>(obuffer),reinterpret_cast<unsigned char*>(ibuffer),size/16,static_cast<CRYPT::BlockMode>(0));
finish = GetTimeMs64();
ofileGPU.write(ibuffer,size);
gpuTime = finish - start;
//printf("GPU processing time: %d (ms)\n",gpuTime);
printf("Total Speedup: %f\n", (double)cpuTime/gpuTime);
delete [] ibuffer;
delete [] obuffer;
ofileCPU.close();
ofileGPU.close();
ifile.close();
} // AESEncryptFile
////////////////////////////////////////////////////////////////////////////////
// AES main
////////////////////////////////////////////////////////////////////////////////
int aes_main(int argc,char** argv)
{
string filename = "";
if(argc>2){
printf("usage: %s [<input-file>]\n",argv[0]);
exit(0);
}
if(argc==2)
filename.assign(argv[1]);
#ifdef _WIN32
// to try to prevent windows from interfering too much
SetPriorityClass(GetCurrentProcess(),HIGH_PRIORITY_CLASS);
SetThreadPriority(GetCurrentThread(),THREAD_PRIORITY_TIME_CRITICAL);
#endif
bool allPassed = true; // asusme this
// some test vectors to check integrity
allPassed &= TestVector(vectors[0],true);
allPassed &= TestVector(vectors[1],false);
allPassed &= TestVector(vectors[2],false);
allPassed &= TestVector(vectors[3],false);
allPassed &= TestVector(vectors[4],false);
#ifdef __TimingTests
// check a bunch of timings for different key and block sizes
//for (int block = 16; block <= 32; block += 8)
for (int key = 16; key <= 32; key += 8)
Timing(100000,key,16);
#endif
// this is to randomly test data
srand(0); // make reproducible
cout << "Random tests:\n";
for (int pos = 0; pos < RANDOM_TEST_COUNT; pos++)
{
bool passed = RandomTest(pos);
allPassed &= passed;
if (passed == false)
cerr << "Random Test " << pos << " failed\n";
}
if (false == allPassed)
cerr << "ERROR: Some test(s) failed\n";
else
cout << "PASSED: All tests passed\n";
// test a file encryption
ifstream ifile(filename);
if (ifile) {
ifile.close();
cout << "\n\nencrypting file test:\n";
AESEncryptFile(filename);
}
cout << "\n\n";
return 0;
} // aes_main
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
aes_main(argc,argv);
/*
shrQAStart(argc, argv);
printf("> runTest<float,32>\n");
runTest<float>( argc, argv, 32);
printf("> runTest<int,64>\n");
runTest<int>( argc, argv, 64);
printf("\n[simpleTemplates] -> Test Results: %d Failures\n", g_TotalFailures);
cutilDeviceReset();
shrQAFinishExit(argc, (const char **)argv, (g_TotalFailures == 0) ? QA_PASSED : QA_FAILED);
*/
}
// To completely templatize runTest (below) with cutil, we need to use
// template specialization to wrap up CUTIL's array comparison and file writing
// functions for different types.
// Here's the generic wrapper for cutCompare*
template<class T>
class ArrayComparator
{
public:
CUTBoolean compare( const T* reference, T* data, unsigned int len)
{
fprintf(stderr, "Error: no comparison function implemented for this type\n");
return CUTFalse;
}
};
// Here's the specialization for ints:
template<>
class ArrayComparator<int>
{
public:
CUTBoolean compare( const int* reference, int* data, unsigned int len)
{
return cutComparei(reference, data, len);
}
};
// Here's the specialization for floats:
template<>
class ArrayComparator<float>
{
public:
CUTBoolean compare( const float* reference, float* data, unsigned int len)
{
return cutComparef(reference, data, len);
}
};
// Here's the generic wrapper for cutWriteFile*
template<class T>
class ArrayFileWriter
{
public:
CUTBoolean write(const char* filename, T* data, unsigned int len, float epsilon)
{
fprintf(stderr, "Error: no file write function implemented for this type\n");
return CUTFalse;
}
};
// Here's the specialization for ints:
template<>
class ArrayFileWriter<int>
{
public:
CUTBoolean write(const char* filename, int* data, unsigned int len, float epsilon)
{
return cutWriteFilei(filename, data, len, epsilon != 0);
}
};
// Here's the specialization for floats:
template<>
class ArrayFileWriter<float>
{
public:
CUTBoolean write(const char* filename, float* data, unsigned int len, float epsilon)
{
return cutWriteFilef(filename, data, len, epsilon);
}
};
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
template<class T>
void runTest( int argc, char** argv, int len)
{
int devID;
cudaDeviceProp deviceProps;
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) {
devID = cutilDeviceInit(argc, argv);
if (devID < 0) {
printf("No CUDA Capable devices found, exiting...\n");
shrQAFinishExit(argc, (const char **)argv, QA_WAIVED);
}
}
else {
devID = cutGetMaxGflopsDeviceId();
cudaSetDevice( devID );
}
// get number of SMs on this GPU
cutilSafeCall(cudaGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s] has %d Multi-Processors\n", deviceProps.name, deviceProps.multiProcessorCount);
unsigned int timer = 0;
cutilCheckError( cutCreateTimer( &timer));
cutilCheckError( cutStartTimer( timer));
unsigned int num_threads = len;
unsigned int mem_size = sizeof( float) * num_threads;
// allocate host memory
T* h_idata = (T*) malloc( mem_size);
// initalize the memory
for( unsigned int i = 0; i < num_threads; ++i)
{
h_idata[i] = (T) i;
}
// allocate device memory
T* d_idata;
cutilSafeCall( cudaMalloc( (void**) &d_idata, mem_size));
// copy host memory to device
cutilSafeCall( cudaMemcpy( d_idata, h_idata, mem_size,
cudaMemcpyHostToDevice) );
// allocate device memory for result
T* d_odata;
cutilSafeCall( cudaMalloc( (void**) &d_odata, mem_size));
// setup execution parameters
dim3 grid( 1, 1, 1);
dim3 threads( num_threads, 1, 1);
printf("launching kernel...\n");
// execute the kernel
testKernel<T><<< grid, threads, mem_size >>>( d_idata, d_odata);
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
// allocate mem for the result on host side
T* h_odata = (T*) malloc( mem_size);
// copy result from device to host
cutilSafeCall( cudaMemcpy( h_odata, d_odata, sizeof(T) * num_threads,
cudaMemcpyDeviceToHost) );
cutilCheckError( cutStopTimer( timer));
printf( "Processing time: %f (ms)\n", cutGetTimerValue( timer));
cutilCheckError( cutDeleteTimer( timer));
// compute reference solution
T* reference = (T*) malloc( mem_size);
computeGold<T>( reference, h_idata, num_threads);
ArrayComparator<T> comparator;
ArrayFileWriter<T> writer;
// check result
if( cutCheckCmdLineFlag( argc, (const char**) argv, "regression"))
{
// write file for regression test
cutilCheckError( writer.write( "./data/regression.dat",
h_odata, num_threads, 0.0));
}
else
{
// custom output handling when no regression test running
// in this case check if the result is equivalent to the expected soluion
CUTBoolean res = comparator.compare( reference, h_odata, num_threads);
printf( "Compare %s\n\n", (1 == res) ? "OK" : "MISMATCH");
g_TotalFailures += (1 != res);
}
// cleanup memory
free( h_idata);
free( h_odata);
free( reference);
cutilSafeCall(cudaFree(d_idata));
cutilSafeCall(cudaFree(d_odata));
cutilDeviceReset();
}
|
883806bf2802dde69952ee7fcebb369e37788903.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// -------------------------------------------------------------
// CUDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision$
// $Date$
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt
// in the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* stringsort_app.cu
*
* @brief CUDPP application-level merge sorting routines
*/
/** @addtogroup cudpp_app
* @{
*/
/** @name StringSort Functions
* @{
*/
#include "cuda_util.h"
#include "cudpp.h"
#include "cudpp_util.h"
#include "cudpp_stringsort.h"
#include "kernel/stringsort_kernel.cuh"
#include "kernel/mergesort_kernel.cuh" //for simpleCopy
#include "limits.h"
#define BLOCKSORT_SIZE 1024
#define DEPTH 8
/** @brief Performs merge sor utilzing three stages.
* (1) Blocksort, (2) simple merge and (3) multi merge on a
* set of strings
*
* @param[in,out] pkeys Keys (first four characters of string) to be sorted.
* @param[in,out] pvals Addresses of string locations for tie-breaks
* @param[out] stringVals global string value array (four characters stuffed into a uint)
* @param[in] numElements Number of elements in the sort.
* @param[in] stringArrayLength The size of our string array in uints (4 chars per uint)
* @param[in] plan Configuration information for mergesort.
**/
void runStringSort(unsigned int *pkeys,
unsigned int *pvals,
unsigned int *stringVals,
size_t numElements,
size_t stringArrayLength,
const CUDPPStringSortPlan *plan)
{
//printf("start\n");
int numPartitions = (numElements+BLOCKSORT_SIZE-1)/BLOCKSORT_SIZE;
int numBlocks = numPartitions/2;
int partitionSize = BLOCKSORT_SIZE;
int subPartitions = 4;
unsigned int* temp_keys;
unsigned int* temp_vals;
CUDA_SAFE_CALL( hipMalloc((void **) &temp_keys, sizeof(unsigned int)*numElements));
CUDA_SAFE_CALL( hipMalloc((void **) &temp_vals, sizeof(unsigned int)*numElements));
unsigned int *partitionSizeA, *partitionBeginA, *partitionSizeB, *partitionBeginB;
unsigned int swapPoint = 32;
int blockLimit = swapPoint*subPartitions;
hipMalloc((void**)&partitionBeginA, blockLimit*sizeof(unsigned int));
hipMalloc((void**)&partitionSizeA, blockLimit*sizeof(unsigned int));
hipMalloc((void**)&partitionBeginB, blockLimit*sizeof(unsigned int));
hipMalloc((void**)&partitionSizeB, blockLimit*sizeof(unsigned int));
int numThreads = 128;
hipLaunchKernelGGL(( blockWiseStringSort<unsigned int, DEPTH>)
, dim3(numPartitions), dim3(BLOCKSORT_SIZE/DEPTH), 2*(BLOCKSORT_SIZE)*sizeof(unsigned int), 0, pkeys, pvals, stringVals, BLOCKSORT_SIZE, numElements, stringArrayLength);
int mult = 1; int count = 0;
CUDA_SAFE_CALL(hipDeviceSynchronize());
//we run p stages of simpleMerge until numBlocks <= some Critical level
while(numPartitions > 32 || (partitionSize*mult < 16384 && numPartitions > 1))
{
//printf("Running simple merge for %d partitions of size %d\n", numPartitions, partitionSize*mult);
numBlocks = (numPartitions&0xFFFE);
if(count%2 == 0)
{
hipLaunchKernelGGL(( simpleStringMerge<unsigned int, 2>)
, dim3(numBlocks), dim3(CTASIZE_simple), sizeof(unsigned int)*(2*INTERSECT_B_BLOCK_SIZE_simple+4), 0, pkeys, temp_keys,
pvals, temp_vals, stringVals, partitionSize*mult, numElements, count, stringArrayLength);
if(numPartitions%2 == 1)
{
int offset = (partitionSize*mult*(numPartitions-1));
int numElementsToCopy = numElements-offset;
hipLaunchKernelGGL(( simpleCopy<unsigned int>)
, dim3((numElementsToCopy+numThreads-1)/numThreads), dim3(numThreads), 0, 0, pkeys, pvals, temp_keys, temp_vals, offset, numElementsToCopy);
}
}
else
{
hipLaunchKernelGGL(( simpleStringMerge<unsigned int, 2>)
, dim3(numBlocks), dim3(CTASIZE_simple), sizeof(unsigned int)*(2*INTERSECT_B_BLOCK_SIZE_simple+4), 0, temp_keys, pkeys,
temp_vals, pvals, stringVals, partitionSize*mult, numElements, count, stringArrayLength);
if(numPartitions%2 == 1)
{
int offset = (partitionSize*mult*(numPartitions-1));
int numElementsToCopy = numElements-offset;
hipLaunchKernelGGL(( simpleCopy<unsigned int>)
, dim3((numElementsToCopy+numThreads-1)/numThreads), dim3(numThreads), 0, 0, temp_keys, temp_vals, pkeys, pvals, offset, numElementsToCopy);
}
}
mult*=2;
count++;
numPartitions = (numPartitions+1)/2;
}
//End of simpleMerge, now blocks cooperate to merge partitions
while (numPartitions > 1)
{
//printf("Running multi merge for %d partitions of size %d\n", numPartitions, partitionSize*mult);
numBlocks = (numPartitions&0xFFFE);
int secondBlocks = ((numBlocks)*subPartitions+numThreads-1)/numThreads;
if(count%2 == 1)
{
hipLaunchKernelGGL(( findMultiPartitions<unsigned int>)
, dim3(secondBlocks), dim3(numThreads), 0, 0, temp_keys, temp_vals, stringVals, subPartitions, numBlocks, partitionSize*mult, partitionBeginA, partitionSizeA,
partitionBeginB, partitionSizeB, numElements, stringArrayLength);
//int lastSubPart = getLastSubPart(numBlocks, subPartitions, partitionSize, mult, numElements);
CUDA_SAFE_CALL(hipDeviceSynchronize());
hipLaunchKernelGGL(( stringMergeMulti<unsigned int, DEPTH_multi>)
, dim3(numBlocks*subPartitions), dim3(CTASIZE_multi), (2*INTERSECT_B_BLOCK_SIZE_multi+4)*sizeof(unsigned int), 0, temp_keys, pkeys, temp_vals,
pvals, stringVals, subPartitions, numBlocks, partitionBeginA, partitionSizeA, partitionBeginB, partitionSizeB, mult*partitionSize, count, numElements, stringArrayLength);
CUDA_SAFE_CALL(hipDeviceSynchronize());
if(numPartitions%2 == 1)
{
int offset = (partitionSize*mult*(numPartitions-1));
int numElementsToCopy = numElements-offset;
hipLaunchKernelGGL(( simpleCopy<unsigned int>)
, dim3((numElementsToCopy+numThreads-1)/numThreads), dim3(numThreads), 0, 0, temp_keys, temp_vals, pkeys, pvals, offset, numElementsToCopy);
}
}
else
{
hipLaunchKernelGGL(( findMultiPartitions<unsigned int>)
, dim3(secondBlocks), dim3(numThreads), 0, 0, pkeys, pvals, stringVals, subPartitions, numBlocks, partitionSize*mult, partitionBeginA, partitionSizeA,
partitionBeginB, partitionSizeB, numElements, stringArrayLength);
CUDA_SAFE_CALL(hipDeviceSynchronize());
//int lastSubPart = getLastSubPart(numBlocks, subPartitions, partitionSize, mult, numElements);
hipLaunchKernelGGL(( stringMergeMulti<unsigned int, DEPTH_multi>)
, dim3(numBlocks*subPartitions), dim3(CTASIZE_multi), (2*INTERSECT_B_BLOCK_SIZE_multi+4)*sizeof(unsigned int), 0, pkeys, temp_keys, pvals,
temp_vals, stringVals, subPartitions, numBlocks, partitionBeginA, partitionSizeA, partitionBeginB, partitionSizeB, mult*partitionSize, count, numElements, stringArrayLength);
CUDA_SAFE_CALL(hipDeviceSynchronize());
if(numPartitions%2 == 1)
{
int offset = (partitionSize*mult*(numPartitions-1));
int numElementsToCopy = numElements-offset;
hipLaunchKernelGGL(( simpleCopy<unsigned int>)
, dim3((numElementsToCopy+numThreads-1)/numThreads), dim3(numThreads), 0, 0, pkeys, pvals, temp_keys, temp_vals, offset, numElementsToCopy);
}
}
count++;
mult*=2;
subPartitions*=2;
numPartitions = (numPartitions+1)/2;
}
if(count%2==1)
{
CUDA_SAFE_CALL(hipMemcpy(pkeys, temp_keys, numElements*sizeof(unsigned int), hipMemcpyDeviceToDevice));
CUDA_SAFE_CALL(hipMemcpy(pvals, temp_vals, numElements*sizeof(unsigned int), hipMemcpyDeviceToDevice));
}
CUDA_SAFE_CALL(hipFree(partitionBeginA));
CUDA_SAFE_CALL(hipFree(partitionBeginB));
CUDA_SAFE_CALL(hipFree(partitionSizeA));
CUDA_SAFE_CALL(hipFree(partitionSizeB));
CUDA_SAFE_CALL(hipFree(temp_keys));
CUDA_SAFE_CALL(hipFree(temp_vals));
//printf("end\n");
}
#ifdef __cplusplus
extern "C"
{
#endif
/**
* @brief From the programmer-specified sort configuration,
* creates internal memory for performing the sort.
*
* @param[in] plan Pointer to CUDPPStringSortPlan object
**/
void allocStringSortStorage(CUDPPStringSortPlan *plan)
{
}
/** @brief Deallocates intermediate memory from allocStringSortStorage.
*
*
* @param[in] plan Pointer to CUDPStringSortPlan object
**/
void freeStringSortStorage(CUDPPStringSortPlan* plan)
{
}
/** @brief Dispatch function to perform a sort on an array with
* a specified configuration.
*
* This is the dispatch routine which calls stringSort...() with
* appropriate template parameters and arguments as specified by
* the plan.
* @param[in,out] keys Keys (first four chars of string) to be sorted.
* @param[in,out] values Address of string values in array of null terminated strings
* @param[in] stringVals Global string array
* @param[in] numElements Number of elements in the sort.
* @param[in] stringArrayLength The size of our string array in uints (4 chars per uint)
* @param[in] plan Configuration information for mergeSort.
**/
void cudppStringSortDispatch(void *keys,
void *values,
void *stringVals,
size_t numElements,
size_t stringArrayLength,
const CUDPPStringSortPlan *plan)
{
runStringSort((unsigned int*)keys, (unsigned int*)values, (unsigned int*) stringVals, numElements, stringArrayLength, plan);
}
#ifdef __cplusplus
}
#endif
/** @} */ // end stringsort functions
/** @} */ // end cudpp_app
| 883806bf2802dde69952ee7fcebb369e37788903.cu | // -------------------------------------------------------------
// CUDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision$
// $Date$
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt
// in the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* stringsort_app.cu
*
* @brief CUDPP application-level merge sorting routines
*/
/** @addtogroup cudpp_app
* @{
*/
/** @name StringSort Functions
* @{
*/
#include "cuda_util.h"
#include "cudpp.h"
#include "cudpp_util.h"
#include "cudpp_stringsort.h"
#include "kernel/stringsort_kernel.cuh"
#include "kernel/mergesort_kernel.cuh" //for simpleCopy
#include "limits.h"
#define BLOCKSORT_SIZE 1024
#define DEPTH 8
/** @brief Performs merge sor utilzing three stages.
* (1) Blocksort, (2) simple merge and (3) multi merge on a
* set of strings
*
* @param[in,out] pkeys Keys (first four characters of string) to be sorted.
* @param[in,out] pvals Addresses of string locations for tie-breaks
* @param[out] stringVals global string value array (four characters stuffed into a uint)
* @param[in] numElements Number of elements in the sort.
* @param[in] stringArrayLength The size of our string array in uints (4 chars per uint)
* @param[in] plan Configuration information for mergesort.
**/
void runStringSort(unsigned int *pkeys,
unsigned int *pvals,
unsigned int *stringVals,
size_t numElements,
size_t stringArrayLength,
const CUDPPStringSortPlan *plan)
{
//printf("start\n");
int numPartitions = (numElements+BLOCKSORT_SIZE-1)/BLOCKSORT_SIZE;
int numBlocks = numPartitions/2;
int partitionSize = BLOCKSORT_SIZE;
int subPartitions = 4;
unsigned int* temp_keys;
unsigned int* temp_vals;
CUDA_SAFE_CALL( cudaMalloc((void **) &temp_keys, sizeof(unsigned int)*numElements));
CUDA_SAFE_CALL( cudaMalloc((void **) &temp_vals, sizeof(unsigned int)*numElements));
unsigned int *partitionSizeA, *partitionBeginA, *partitionSizeB, *partitionBeginB;
unsigned int swapPoint = 32;
int blockLimit = swapPoint*subPartitions;
cudaMalloc((void**)&partitionBeginA, blockLimit*sizeof(unsigned int));
cudaMalloc((void**)&partitionSizeA, blockLimit*sizeof(unsigned int));
cudaMalloc((void**)&partitionBeginB, blockLimit*sizeof(unsigned int));
cudaMalloc((void**)&partitionSizeB, blockLimit*sizeof(unsigned int));
int numThreads = 128;
blockWiseStringSort<unsigned int, DEPTH>
<<<numPartitions, BLOCKSORT_SIZE/DEPTH, 2*(BLOCKSORT_SIZE)*sizeof(unsigned int)>>>(pkeys, pvals, stringVals, BLOCKSORT_SIZE, numElements, stringArrayLength);
int mult = 1; int count = 0;
CUDA_SAFE_CALL(cudaThreadSynchronize());
//we run p stages of simpleMerge until numBlocks <= some Critical level
while(numPartitions > 32 || (partitionSize*mult < 16384 && numPartitions > 1))
{
//printf("Running simple merge for %d partitions of size %d\n", numPartitions, partitionSize*mult);
numBlocks = (numPartitions&0xFFFE);
if(count%2 == 0)
{
simpleStringMerge<unsigned int, 2>
<<<numBlocks, CTASIZE_simple, sizeof(unsigned int)*(2*INTERSECT_B_BLOCK_SIZE_simple+4)>>>(pkeys, temp_keys,
pvals, temp_vals, stringVals, partitionSize*mult, numElements, count, stringArrayLength);
if(numPartitions%2 == 1)
{
int offset = (partitionSize*mult*(numPartitions-1));
int numElementsToCopy = numElements-offset;
simpleCopy<unsigned int>
<<<(numElementsToCopy+numThreads-1)/numThreads, numThreads>>>(pkeys, pvals, temp_keys, temp_vals, offset, numElementsToCopy);
}
}
else
{
simpleStringMerge<unsigned int, 2>
<<<numBlocks, CTASIZE_simple, sizeof(unsigned int)*(2*INTERSECT_B_BLOCK_SIZE_simple+4)>>>(temp_keys, pkeys,
temp_vals, pvals, stringVals, partitionSize*mult, numElements, count, stringArrayLength);
if(numPartitions%2 == 1)
{
int offset = (partitionSize*mult*(numPartitions-1));
int numElementsToCopy = numElements-offset;
simpleCopy<unsigned int>
<<<(numElementsToCopy+numThreads-1)/numThreads, numThreads>>>(temp_keys, temp_vals, pkeys, pvals, offset, numElementsToCopy);
}
}
mult*=2;
count++;
numPartitions = (numPartitions+1)/2;
}
//End of simpleMerge, now blocks cooperate to merge partitions
while (numPartitions > 1)
{
//printf("Running multi merge for %d partitions of size %d\n", numPartitions, partitionSize*mult);
numBlocks = (numPartitions&0xFFFE);
int secondBlocks = ((numBlocks)*subPartitions+numThreads-1)/numThreads;
if(count%2 == 1)
{
findMultiPartitions<unsigned int>
<<<secondBlocks, numThreads>>>(temp_keys, temp_vals, stringVals, subPartitions, numBlocks, partitionSize*mult, partitionBeginA, partitionSizeA,
partitionBeginB, partitionSizeB, numElements, stringArrayLength);
//int lastSubPart = getLastSubPart(numBlocks, subPartitions, partitionSize, mult, numElements);
CUDA_SAFE_CALL(cudaThreadSynchronize());
stringMergeMulti<unsigned int, DEPTH_multi>
<<<numBlocks*subPartitions, CTASIZE_multi, (2*INTERSECT_B_BLOCK_SIZE_multi+4)*sizeof(unsigned int)>>>(temp_keys, pkeys, temp_vals,
pvals, stringVals, subPartitions, numBlocks, partitionBeginA, partitionSizeA, partitionBeginB, partitionSizeB, mult*partitionSize, count, numElements, stringArrayLength);
CUDA_SAFE_CALL(cudaThreadSynchronize());
if(numPartitions%2 == 1)
{
int offset = (partitionSize*mult*(numPartitions-1));
int numElementsToCopy = numElements-offset;
simpleCopy<unsigned int>
<<<(numElementsToCopy+numThreads-1)/numThreads, numThreads>>>(temp_keys, temp_vals, pkeys, pvals, offset, numElementsToCopy);
}
}
else
{
findMultiPartitions<unsigned int>
<<<secondBlocks, numThreads>>>(pkeys, pvals, stringVals, subPartitions, numBlocks, partitionSize*mult, partitionBeginA, partitionSizeA,
partitionBeginB, partitionSizeB, numElements, stringArrayLength);
CUDA_SAFE_CALL(cudaThreadSynchronize());
//int lastSubPart = getLastSubPart(numBlocks, subPartitions, partitionSize, mult, numElements);
stringMergeMulti<unsigned int, DEPTH_multi>
<<<numBlocks*subPartitions, CTASIZE_multi, (2*INTERSECT_B_BLOCK_SIZE_multi+4)*sizeof(unsigned int)>>>(pkeys, temp_keys, pvals,
temp_vals, stringVals, subPartitions, numBlocks, partitionBeginA, partitionSizeA, partitionBeginB, partitionSizeB, mult*partitionSize, count, numElements, stringArrayLength);
CUDA_SAFE_CALL(cudaThreadSynchronize());
if(numPartitions%2 == 1)
{
int offset = (partitionSize*mult*(numPartitions-1));
int numElementsToCopy = numElements-offset;
simpleCopy<unsigned int>
<<<(numElementsToCopy+numThreads-1)/numThreads, numThreads>>>(pkeys, pvals, temp_keys, temp_vals, offset, numElementsToCopy);
}
}
count++;
mult*=2;
subPartitions*=2;
numPartitions = (numPartitions+1)/2;
}
if(count%2==1)
{
CUDA_SAFE_CALL(cudaMemcpy(pkeys, temp_keys, numElements*sizeof(unsigned int), cudaMemcpyDeviceToDevice));
CUDA_SAFE_CALL(cudaMemcpy(pvals, temp_vals, numElements*sizeof(unsigned int), cudaMemcpyDeviceToDevice));
}
CUDA_SAFE_CALL(cudaFree(partitionBeginA));
CUDA_SAFE_CALL(cudaFree(partitionBeginB));
CUDA_SAFE_CALL(cudaFree(partitionSizeA));
CUDA_SAFE_CALL(cudaFree(partitionSizeB));
CUDA_SAFE_CALL(cudaFree(temp_keys));
CUDA_SAFE_CALL(cudaFree(temp_vals));
//printf("end\n");
}
#ifdef __cplusplus
extern "C"
{
#endif
/**
* @brief From the programmer-specified sort configuration,
* creates internal memory for performing the sort.
*
* @param[in] plan Pointer to CUDPPStringSortPlan object
**/
void allocStringSortStorage(CUDPPStringSortPlan *plan)
{
}
/** @brief Deallocates intermediate memory from allocStringSortStorage.
*
*
* @param[in] plan Pointer to CUDPStringSortPlan object
**/
void freeStringSortStorage(CUDPPStringSortPlan* plan)
{
}
/** @brief Dispatch function to perform a sort on an array with
* a specified configuration.
*
* This is the dispatch routine which calls stringSort...() with
* appropriate template parameters and arguments as specified by
* the plan.
* @param[in,out] keys Keys (first four chars of string) to be sorted.
* @param[in,out] values Address of string values in array of null terminated strings
* @param[in] stringVals Global string array
* @param[in] numElements Number of elements in the sort.
* @param[in] stringArrayLength The size of our string array in uints (4 chars per uint)
* @param[in] plan Configuration information for mergeSort.
**/
void cudppStringSortDispatch(void *keys,
void *values,
void *stringVals,
size_t numElements,
size_t stringArrayLength,
const CUDPPStringSortPlan *plan)
{
runStringSort((unsigned int*)keys, (unsigned int*)values, (unsigned int*) stringVals, numElements, stringArrayLength, plan);
}
#ifdef __cplusplus
}
#endif
/** @} */ // end stringsort functions
/** @} */ // end cudpp_app
|
054a6a502e08a4b844f592dc9f30bd27fa406f0b.hip | // !!! This is a file automatically generated by hipify!!!
/* This code accompanies
* Two relaxation time lattice Boltzmann method coupled to fast Fourier transform Poisson solver: Application to electroconvective flow, Journal of Computational Physics
* https://doi.org/10.1016/j.jcp.2019.07.029
* Three-dimensional Electro-convective Vortices in Cross-flow
* https://journals.aps.org/pre/abstract/10.1103/PhysRevE.101.033103
* Yifei Guan, James Riley, Igor Novosselov
* University of Washington
*
* Author: Yifei Guan
*
*/
#define _CRT_SECURE_NO_WARNINGS
#include <stdio.h>
#include <stdlib.h>
#include "seconds.h"
#include "LBM.h"
#include "LBM.cu"
#include "poisson.cu"
#include <hip/hip_runtime.h>
#include <hipfft.h>
int main(int argc, char* argv[])
{
checkCudaErrors(hipMalloc((void**)&test, sizeof(double)));
hipMemcpyFromSymbol(&dt_host, dt, sizeof(double), 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&Lx_host, Lx, sizeof(double), 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&Ly_host, Ly, sizeof(double), 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&dy_host, dy, sizeof(double), 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&Lz_host, Lz, sizeof(double), 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&dz_host, dz, sizeof(double), 0, hipMemcpyDeviceToHost);
hipMemcpyToSymbol(nu, &nu_host, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(uw, &uw_host, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(exf, &exf_host, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(K, &K_host, sizeof(double), 0, hipMemcpyHostToDevice);
// Compute parameters
compute_parameters(T, M, C, Fe);
printf("Simulating 3D electrohydrodynamics vortices\n");
printf("By: Yifei Guan, University of Washington\n");
printf(" domain size: %ux%ux%u\n",NX,NY,NZ);
printf(" T: %g\n",*T);
printf(" M: %g\n",*M);
printf(" C: %g\n",*C);
printf(" Fe: %g\n",*Fe);
printf(" uwall: %g\n",uw_host);
printf(" External force: %g\n",exf_host);
printf(" timesteps: %u\n",NSTEPS);
printf(" save every: %u\n",NSAVE);
printf(" message every: %u\n",NMSG);
printf("\n");
double bytesPerMiB = 1024.0*1024.0;
double bytesPerGiB = 1024.0*1024.0*1024.0;
checkCudaErrors(hipSetDevice(0));
int deviceId = 0;
checkCudaErrors(hipGetDevice(&deviceId));
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, deviceId));
size_t gpu_free_mem, gpu_total_mem;
checkCudaErrors(hipMemGetInfo(&gpu_free_mem,&gpu_total_mem));
printf("CUDA information\n");
printf(" using device: %d\n", deviceId);
printf(" name: %s\n",deviceProp.name);
printf(" multiprocessors: %d\n",deviceProp.multiProcessorCount);
printf(" compute capability: %d.%d\n",deviceProp.major,deviceProp.minor);
printf(" global memory: %.1f MiB\n",deviceProp.totalGlobalMem/bytesPerMiB);
printf(" free memory: %.1f MiB\n",gpu_free_mem/bytesPerMiB);
printf("\n");
// storage of f0 at upper and lower plate
checkCudaErrors(hipMalloc((void**)&f0bc, sizeof(double)*NX*NY*2));
//double *prop_gpu;
// microscopic variables
checkCudaErrors(hipMalloc((void**)&f0_gpu, mem_size_0dir));
checkCudaErrors(hipMalloc((void**)&f1_gpu, mem_size_n0dir));
checkCudaErrors(hipMalloc((void**)&f2_gpu, mem_size_n0dir));
checkCudaErrors(hipMalloc((void**)&h0_gpu, mem_size_0dir));
checkCudaErrors(hipMalloc((void**)&h1_gpu, mem_size_n0dir));
checkCudaErrors(hipMalloc((void**)&h2_gpu, mem_size_n0dir));
// macroscopic variables
checkCudaErrors(hipMalloc((void**)&rho_gpu, mem_size_scalar));
checkCudaErrors(hipMalloc((void**)&ux_gpu, mem_size_scalar));
checkCudaErrors(hipMalloc((void**)&uy_gpu, mem_size_scalar));
checkCudaErrors(hipMalloc((void**)&uz_gpu, mem_size_scalar));
checkCudaErrors(hipMalloc((void**)&charge_gpu, mem_size_scalar));
checkCudaErrors(hipMalloc((void**)&phi_gpu, mem_size_scalar));
checkCudaErrors(hipMalloc((void**)&Ex_gpu, mem_size_scalar));
checkCudaErrors(hipMalloc((void**)&Ey_gpu, mem_size_scalar));
checkCudaErrors(hipMalloc((void**)&Ez_gpu, mem_size_scalar));
checkCudaErrors(hipMalloc((void**)&kx, sizeof(double)*NX));
checkCudaErrors(hipMalloc((void**)&ky, sizeof(double)*NY));
checkCudaErrors(hipMalloc((void**)&kz, sizeof(double)*NE));
// Setup the cuFFT plan
CHECK_CUFFT(hipfftPlan3d(&plan, NE, NY, NX, HIPFFT_Z2Z));
checkCudaErrors(hipMalloc((void**)&freq_gpu_ext, sizeof(hipfftDoubleComplex)*NX*NY*NE));
checkCudaErrors(hipMalloc((void**)&phi_gpu_ext, sizeof(hipfftDoubleComplex)*NX*NY*NE));
checkCudaErrors(hipMalloc((void**)&charge_gpu_ext, sizeof(hipfftDoubleComplex)*NX*NY*NE));
// Setup the frequencies kx and ky
for (unsigned i = 0; i <= NX / 2; i++)
{
kx_host[i] = (double)i * 2.0 * M_PI / Lx_host;
}
for (unsigned i = NX / 2 + 1; i < NX; i++)
{
kx_host[i] = ((double) i - NX) * 2.0 * M_PI / Lx_host;
}
for (unsigned i = 0; i <= NY / 2; i++)
{
ky_host[i] = (double)i * 2.0 * M_PI / Ly_host;
}
for (unsigned i = NY / 2 + 1; i < NY; i++)
{
ky_host[i] = ((double)i - NY) * 2.0 * M_PI / Ly_host;
}
for (unsigned i = 0; i <= NE / 2; i++)
{
kz_host[i] = (double)i * 2.0 * M_PI / (NE*dz_host);
}
for (unsigned i = NE / 2 + 1; i < NE; i++)
{
kz_host[i] = ((double)i - NE) * 2.0 * M_PI / (NE*dz_host);
}
CHECK(hipMemcpy(kx, kx_host,
sizeof(double) * NX, hipMemcpyHostToDevice));
CHECK(hipMemcpy(ky, ky_host,
sizeof(double) * NY, hipMemcpyHostToDevice));
CHECK(hipMemcpy(kz, kz_host,
sizeof(double) * NE, hipMemcpyHostToDevice));
// create event objects
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
//printf("Read previous data: Press 1. Start a new simulation: Press 0.\n ");
//scanf("%d", &flag);
//printf("%d\n", flag);
if (flag == 1) {
read_data(&t, rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, uz_gpu, Ex_gpu, Ey_gpu, Ez_gpu);
}
else {
// Zero flow at t=0
// to initialize rho, charge, phi, ux, uy, Ex, Ey fields.
initialization(rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, uz_gpu, Ex_gpu, Ey_gpu, Ez_gpu);
t = 0;
}
// initialise f1,h1 as equilibrium for rho, ux, uy, charge, ex, ey
init_equilibrium(f0_gpu,f1_gpu,h0_gpu,h1_gpu, rho_gpu,charge_gpu, ux_gpu,uy_gpu,uz_gpu,Ex_gpu,Ey_gpu,Ez_gpu);
// open file for writing
FILE *fout = fopen("data.dat", "wb+");
save_data_tecplot(fout, t, rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, uz_gpu, Ex_gpu, Ey_gpu, Ez_gpu,1);
// =============================================================================================================
// File for saving umax
// =============================================================================================================
FILE *fumax = fopen("umax.dat", "wb+");
// report computational results to screen
//report_flow_properties(0, t, rho_gpu, charge_gpu, phi_gpu, ux_gpu,uy_gpu, uz_gpu,Ex_gpu, Ey_gpu,Ez_gpu);
double begin = seconds();
checkCudaErrors(hipEventRecord(start,0));
// main simulation loop; take NSTEPS time steps
for (int i = 0; i < NSTEPS; i++) {
// stream and collide from f1 storing to f2
// optionally compute and save moments
stream_collide_save(f0_gpu, f1_gpu, f2_gpu, h0_gpu, h1_gpu, h2_gpu, rho_gpu, charge_gpu,
ux_gpu, uy_gpu, uz_gpu, Ex_gpu, Ey_gpu, Ez_gpu, t, f0bc);
// =========================================================================
// Fast poisson solver
// =========================================================================
// Extend the domain
extension(charge_gpu, charge_gpu_ext);
// Execute a real-to-complex 2D FFT
CHECK_CUFFT(hipfftExecZ2Z(plan, charge_gpu_ext, freq_gpu_ext, HIPFFT_FORWARD));
// Execute the derivatives in frequency domain
derivative(kx, ky, kz, freq_gpu_ext);
// Execute a complex-to-complex 2D IFFT
CHECK_CUFFT(hipfftExecZ2Z(plan, freq_gpu_ext, phi_gpu_ext, HIPFFT_BACKWARD));
// Extraction of phi from extended domain phi_gpu_ext
extract(phi_gpu, phi_gpu_ext);
// Calculate electric field strength
efield(phi_gpu, Ex_gpu, Ey_gpu, Ez_gpu);
t = t + dt_host;
if (i%NSAVE == 1) {
save_data_tecplot(fout, t, rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, uz_gpu, Ex_gpu, Ey_gpu, Ez_gpu,0);
}
if (i%NDMD == 1){
printf("Iteration: %u, physical time: %g.\n", i, t);
// =============================================================================================================
// save for MATLAB postprocessing
// =============================================================================================================
char filename[128];
sprintf(filename, "%g.dat", t);
FILE *fout2 = fopen(filename, "wb+");
save_data_dmd(fout2, t, uz_gpu);
fclose(fout2);
}
//if (i%printCurrent == 1){
if (i%printCurrent == 1) {
checkCudaErrors(hipMemcpy(charge_host, charge_gpu, mem_size_scalar, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(Ez_host, Ez_gpu, mem_size_scalar, hipMemcpyDeviceToHost));
double current_host = current(charge_host, Ez_host);
printf("Current = %g\n", current_host);
//printf("%g\n", Ez_host[scalar_index(0, 0, 0)]);
// =============================================================================================================
// save umax
// =============================================================================================================
record_umax(fumax, t, ux_gpu, uy_gpu, uz_gpu);
}
}
// end of simulation
checkCudaErrors(hipEventRecord(stop,0));
checkCudaErrors(hipEventSynchronize(stop));
float milliseconds = 0.0f;
checkCudaErrors(hipEventElapsedTime(&milliseconds,start,stop));
double end = seconds();
double runtime = end-begin;
double gpu_runtime = 0.001*milliseconds;
size_t doubles_read = ndir; // per node every time step
size_t doubles_written = ndir;
size_t doubles_saved = 3; // per node every NSAVE time steps
// note NX*NY overflows when NX=NY=65536
size_t nodes_updated = NSTEPS*size_t(NX*NY*NZ);
size_t nodes_saved = (NSTEPS/NSAVE)*size_t(NX*NY*NZ);
double speed = nodes_updated/(1e6*runtime);
double bandwidth = (nodes_updated*(doubles_read + doubles_written)+nodes_saved*(doubles_saved))*sizeof(double)/(runtime*bytesPerGiB);
printf(" ----- performance information -----\n");
printf(" timesteps: %u\n",NSTEPS);
printf(" clock runtime: %.3f (s)\n",runtime);
printf(" gpu runtime: %.3f (s)\n",gpu_runtime);
printf(" speed: %.2f (Mlups)\n",speed);
save_data_tecplot(fout, t, rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, uz_gpu, Ex_gpu, Ey_gpu,Ez_gpu,0);
fclose(fout);
// =============================================================================================================
// save umax
// =============================================================================================================
record_umax(fumax, t, ux_gpu, uy_gpu, uz_gpu);
fclose(fumax);
FILE *fend = fopen("data_end.dat", "wb+");
save_data_end(fend, t, rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, uz_gpu, Ex_gpu, Ey_gpu, Ez_gpu);
fclose(fend);
// destory event objects
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
// free all memory allocatd on the GPU and host
checkCudaErrors(hipFree(f0_gpu));
checkCudaErrors(hipFree(f1_gpu));
checkCudaErrors(hipFree(f2_gpu));
checkCudaErrors(hipFree(h0_gpu));
checkCudaErrors(hipFree(h1_gpu));
checkCudaErrors(hipFree(h2_gpu));
checkCudaErrors(hipFree(rho_gpu));
checkCudaErrors(hipFree(phi_gpu));
checkCudaErrors(hipFree(Ex_gpu));
checkCudaErrors(hipFree(Ey_gpu));
checkCudaErrors(hipFree(Ez_gpu));
checkCudaErrors(hipFree(ux_gpu));
checkCudaErrors(hipFree(uy_gpu));
checkCudaErrors(hipFree(uz_gpu));
checkCudaErrors(hipFree(f0bc));
checkCudaErrors(hipFree(charge_gpu_ext));
checkCudaErrors(hipFree(phi_gpu_ext));
checkCudaErrors(hipFree(freq_gpu_ext));
checkCudaErrors(hipFree(kx));
checkCudaErrors(hipFree(ky));
checkCudaErrors(hipFree(kz));
CHECK_CUFFT(hipfftDestroy(plan));
//checkCudaErrors(hipFree(prop_gpu));
free(kx_host);
free(ky_host);
free(kz_host);
// release resources associated with the GPU device
hipDeviceReset();
//system("pause");
return 0;
}
| 054a6a502e08a4b844f592dc9f30bd27fa406f0b.cu | /* This code accompanies
* Two relaxation time lattice Boltzmann method coupled to fast Fourier transform Poisson solver: Application to electroconvective flow, Journal of Computational Physics
* https://doi.org/10.1016/j.jcp.2019.07.029
* Three-dimensional Electro-convective Vortices in Cross-flow
* https://journals.aps.org/pre/abstract/10.1103/PhysRevE.101.033103
* Yifei Guan, James Riley, Igor Novosselov
* University of Washington
*
* Author: Yifei Guan
*
*/
#define _CRT_SECURE_NO_WARNINGS
#include <stdio.h>
#include <stdlib.h>
#include "seconds.h"
#include "LBM.h"
#include "LBM.cu"
#include "poisson.cu"
#include <cuda_runtime.h>
#include <cufft.h>
int main(int argc, char* argv[])
{
checkCudaErrors(cudaMalloc((void**)&test, sizeof(double)));
cudaMemcpyFromSymbol(&dt_host, dt, sizeof(double), 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&Lx_host, Lx, sizeof(double), 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&Ly_host, Ly, sizeof(double), 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&dy_host, dy, sizeof(double), 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&Lz_host, Lz, sizeof(double), 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&dz_host, dz, sizeof(double), 0, cudaMemcpyDeviceToHost);
cudaMemcpyToSymbol(nu, &nu_host, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(uw, &uw_host, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(exf, &exf_host, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(K, &K_host, sizeof(double), 0, cudaMemcpyHostToDevice);
// Compute parameters
compute_parameters(T, M, C, Fe);
printf("Simulating 3D electrohydrodynamics vortices\n");
printf("By: Yifei Guan, University of Washington\n");
printf(" domain size: %ux%ux%u\n",NX,NY,NZ);
printf(" T: %g\n",*T);
printf(" M: %g\n",*M);
printf(" C: %g\n",*C);
printf(" Fe: %g\n",*Fe);
printf(" uwall: %g\n",uw_host);
printf(" External force: %g\n",exf_host);
printf(" timesteps: %u\n",NSTEPS);
printf(" save every: %u\n",NSAVE);
printf(" message every: %u\n",NMSG);
printf("\n");
double bytesPerMiB = 1024.0*1024.0;
double bytesPerGiB = 1024.0*1024.0*1024.0;
checkCudaErrors(cudaSetDevice(0));
int deviceId = 0;
checkCudaErrors(cudaGetDevice(&deviceId));
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, deviceId));
size_t gpu_free_mem, gpu_total_mem;
checkCudaErrors(cudaMemGetInfo(&gpu_free_mem,&gpu_total_mem));
printf("CUDA information\n");
printf(" using device: %d\n", deviceId);
printf(" name: %s\n",deviceProp.name);
printf(" multiprocessors: %d\n",deviceProp.multiProcessorCount);
printf(" compute capability: %d.%d\n",deviceProp.major,deviceProp.minor);
printf(" global memory: %.1f MiB\n",deviceProp.totalGlobalMem/bytesPerMiB);
printf(" free memory: %.1f MiB\n",gpu_free_mem/bytesPerMiB);
printf("\n");
// storage of f0 at upper and lower plate
checkCudaErrors(cudaMalloc((void**)&f0bc, sizeof(double)*NX*NY*2));
//double *prop_gpu;
// microscopic variables
checkCudaErrors(cudaMalloc((void**)&f0_gpu, mem_size_0dir));
checkCudaErrors(cudaMalloc((void**)&f1_gpu, mem_size_n0dir));
checkCudaErrors(cudaMalloc((void**)&f2_gpu, mem_size_n0dir));
checkCudaErrors(cudaMalloc((void**)&h0_gpu, mem_size_0dir));
checkCudaErrors(cudaMalloc((void**)&h1_gpu, mem_size_n0dir));
checkCudaErrors(cudaMalloc((void**)&h2_gpu, mem_size_n0dir));
// macroscopic variables
checkCudaErrors(cudaMalloc((void**)&rho_gpu, mem_size_scalar));
checkCudaErrors(cudaMalloc((void**)&ux_gpu, mem_size_scalar));
checkCudaErrors(cudaMalloc((void**)&uy_gpu, mem_size_scalar));
checkCudaErrors(cudaMalloc((void**)&uz_gpu, mem_size_scalar));
checkCudaErrors(cudaMalloc((void**)&charge_gpu, mem_size_scalar));
checkCudaErrors(cudaMalloc((void**)&phi_gpu, mem_size_scalar));
checkCudaErrors(cudaMalloc((void**)&Ex_gpu, mem_size_scalar));
checkCudaErrors(cudaMalloc((void**)&Ey_gpu, mem_size_scalar));
checkCudaErrors(cudaMalloc((void**)&Ez_gpu, mem_size_scalar));
checkCudaErrors(cudaMalloc((void**)&kx, sizeof(double)*NX));
checkCudaErrors(cudaMalloc((void**)&ky, sizeof(double)*NY));
checkCudaErrors(cudaMalloc((void**)&kz, sizeof(double)*NE));
// Setup the cuFFT plan
CHECK_CUFFT(cufftPlan3d(&plan, NE, NY, NX, CUFFT_Z2Z));
checkCudaErrors(cudaMalloc((void**)&freq_gpu_ext, sizeof(cufftDoubleComplex)*NX*NY*NE));
checkCudaErrors(cudaMalloc((void**)&phi_gpu_ext, sizeof(cufftDoubleComplex)*NX*NY*NE));
checkCudaErrors(cudaMalloc((void**)&charge_gpu_ext, sizeof(cufftDoubleComplex)*NX*NY*NE));
// Setup the frequencies kx and ky
for (unsigned i = 0; i <= NX / 2; i++)
{
kx_host[i] = (double)i * 2.0 * M_PI / Lx_host;
}
for (unsigned i = NX / 2 + 1; i < NX; i++)
{
kx_host[i] = ((double) i - NX) * 2.0 * M_PI / Lx_host;
}
for (unsigned i = 0; i <= NY / 2; i++)
{
ky_host[i] = (double)i * 2.0 * M_PI / Ly_host;
}
for (unsigned i = NY / 2 + 1; i < NY; i++)
{
ky_host[i] = ((double)i - NY) * 2.0 * M_PI / Ly_host;
}
for (unsigned i = 0; i <= NE / 2; i++)
{
kz_host[i] = (double)i * 2.0 * M_PI / (NE*dz_host);
}
for (unsigned i = NE / 2 + 1; i < NE; i++)
{
kz_host[i] = ((double)i - NE) * 2.0 * M_PI / (NE*dz_host);
}
CHECK(cudaMemcpy(kx, kx_host,
sizeof(double) * NX, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(ky, ky_host,
sizeof(double) * NY, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(kz, kz_host,
sizeof(double) * NE, cudaMemcpyHostToDevice));
// create event objects
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
//printf("Read previous data: Press 1. Start a new simulation: Press 0.\n ");
//scanf("%d", &flag);
//printf("%d\n", flag);
if (flag == 1) {
read_data(&t, rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, uz_gpu, Ex_gpu, Ey_gpu, Ez_gpu);
}
else {
// Zero flow at t=0
// to initialize rho, charge, phi, ux, uy, Ex, Ey fields.
initialization(rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, uz_gpu, Ex_gpu, Ey_gpu, Ez_gpu);
t = 0;
}
// initialise f1,h1 as equilibrium for rho, ux, uy, charge, ex, ey
init_equilibrium(f0_gpu,f1_gpu,h0_gpu,h1_gpu, rho_gpu,charge_gpu, ux_gpu,uy_gpu,uz_gpu,Ex_gpu,Ey_gpu,Ez_gpu);
// open file for writing
FILE *fout = fopen("data.dat", "wb+");
save_data_tecplot(fout, t, rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, uz_gpu, Ex_gpu, Ey_gpu, Ez_gpu,1);
// =============================================================================================================
// File for saving umax
// =============================================================================================================
FILE *fumax = fopen("umax.dat", "wb+");
// report computational results to screen
//report_flow_properties(0, t, rho_gpu, charge_gpu, phi_gpu, ux_gpu,uy_gpu, uz_gpu,Ex_gpu, Ey_gpu,Ez_gpu);
double begin = seconds();
checkCudaErrors(cudaEventRecord(start,0));
// main simulation loop; take NSTEPS time steps
for (int i = 0; i < NSTEPS; i++) {
// stream and collide from f1 storing to f2
// optionally compute and save moments
stream_collide_save(f0_gpu, f1_gpu, f2_gpu, h0_gpu, h1_gpu, h2_gpu, rho_gpu, charge_gpu,
ux_gpu, uy_gpu, uz_gpu, Ex_gpu, Ey_gpu, Ez_gpu, t, f0bc);
// =========================================================================
// Fast poisson solver
// =========================================================================
// Extend the domain
extension(charge_gpu, charge_gpu_ext);
// Execute a real-to-complex 2D FFT
CHECK_CUFFT(cufftExecZ2Z(plan, charge_gpu_ext, freq_gpu_ext, CUFFT_FORWARD));
// Execute the derivatives in frequency domain
derivative(kx, ky, kz, freq_gpu_ext);
// Execute a complex-to-complex 2D IFFT
CHECK_CUFFT(cufftExecZ2Z(plan, freq_gpu_ext, phi_gpu_ext, CUFFT_INVERSE));
// Extraction of phi from extended domain phi_gpu_ext
extract(phi_gpu, phi_gpu_ext);
// Calculate electric field strength
efield(phi_gpu, Ex_gpu, Ey_gpu, Ez_gpu);
t = t + dt_host;
if (i%NSAVE == 1) {
save_data_tecplot(fout, t, rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, uz_gpu, Ex_gpu, Ey_gpu, Ez_gpu,0);
}
if (i%NDMD == 1){
printf("Iteration: %u, physical time: %g.\n", i, t);
// =============================================================================================================
// save for MATLAB postprocessing
// =============================================================================================================
char filename[128];
sprintf(filename, "%g.dat", t);
FILE *fout2 = fopen(filename, "wb+");
save_data_dmd(fout2, t, uz_gpu);
fclose(fout2);
}
//if (i%printCurrent == 1){
if (i%printCurrent == 1) {
checkCudaErrors(cudaMemcpy(charge_host, charge_gpu, mem_size_scalar, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(Ez_host, Ez_gpu, mem_size_scalar, cudaMemcpyDeviceToHost));
double current_host = current(charge_host, Ez_host);
printf("Current = %g\n", current_host);
//printf("%g\n", Ez_host[scalar_index(0, 0, 0)]);
// =============================================================================================================
// save umax
// =============================================================================================================
record_umax(fumax, t, ux_gpu, uy_gpu, uz_gpu);
}
}
// end of simulation
checkCudaErrors(cudaEventRecord(stop,0));
checkCudaErrors(cudaEventSynchronize(stop));
float milliseconds = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&milliseconds,start,stop));
double end = seconds();
double runtime = end-begin;
double gpu_runtime = 0.001*milliseconds;
size_t doubles_read = ndir; // per node every time step
size_t doubles_written = ndir;
size_t doubles_saved = 3; // per node every NSAVE time steps
// note NX*NY overflows when NX=NY=65536
size_t nodes_updated = NSTEPS*size_t(NX*NY*NZ);
size_t nodes_saved = (NSTEPS/NSAVE)*size_t(NX*NY*NZ);
double speed = nodes_updated/(1e6*runtime);
double bandwidth = (nodes_updated*(doubles_read + doubles_written)+nodes_saved*(doubles_saved))*sizeof(double)/(runtime*bytesPerGiB);
printf(" ----- performance information -----\n");
printf(" timesteps: %u\n",NSTEPS);
printf(" clock runtime: %.3f (s)\n",runtime);
printf(" gpu runtime: %.3f (s)\n",gpu_runtime);
printf(" speed: %.2f (Mlups)\n",speed);
save_data_tecplot(fout, t, rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, uz_gpu, Ex_gpu, Ey_gpu,Ez_gpu,0);
fclose(fout);
// =============================================================================================================
// save umax
// =============================================================================================================
record_umax(fumax, t, ux_gpu, uy_gpu, uz_gpu);
fclose(fumax);
FILE *fend = fopen("data_end.dat", "wb+");
save_data_end(fend, t, rho_gpu, charge_gpu, phi_gpu, ux_gpu, uy_gpu, uz_gpu, Ex_gpu, Ey_gpu, Ez_gpu);
fclose(fend);
// destory event objects
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
// free all memory allocatd on the GPU and host
checkCudaErrors(cudaFree(f0_gpu));
checkCudaErrors(cudaFree(f1_gpu));
checkCudaErrors(cudaFree(f2_gpu));
checkCudaErrors(cudaFree(h0_gpu));
checkCudaErrors(cudaFree(h1_gpu));
checkCudaErrors(cudaFree(h2_gpu));
checkCudaErrors(cudaFree(rho_gpu));
checkCudaErrors(cudaFree(phi_gpu));
checkCudaErrors(cudaFree(Ex_gpu));
checkCudaErrors(cudaFree(Ey_gpu));
checkCudaErrors(cudaFree(Ez_gpu));
checkCudaErrors(cudaFree(ux_gpu));
checkCudaErrors(cudaFree(uy_gpu));
checkCudaErrors(cudaFree(uz_gpu));
checkCudaErrors(cudaFree(f0bc));
checkCudaErrors(cudaFree(charge_gpu_ext));
checkCudaErrors(cudaFree(phi_gpu_ext));
checkCudaErrors(cudaFree(freq_gpu_ext));
checkCudaErrors(cudaFree(kx));
checkCudaErrors(cudaFree(ky));
checkCudaErrors(cudaFree(kz));
CHECK_CUFFT(cufftDestroy(plan));
//checkCudaErrors(cudaFree(prop_gpu));
free(kx_host);
free(ky_host);
free(kz_host);
// release resources associated with the GPU device
cudaDeviceReset();
//system("pause");
return 0;
}
|
f16fcf919eca60c6733da2de91e578189c8510a2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void curvi (double * __restrict__ r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * __restrict__ strx, double * __restrict__ stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3) {
for (int i=2; i<=N-3; i++) {
r1[i*N*N+j*N+k] +=
c2*( mu[i][j+2][k]*met1[i][j+2][k]*met1[i][j+2][k]*(
c2*(u2[i+2][j+2][k]-u2[i-2][j+2][k]) +
c1*(u2[i+1][j+2][k]-u2[i-1][j+2][k]) )
+ mu[i][j-2][k]*met1[i][j-2][k]*met1[i][j-2][k]*(
c2*(u2[i+2][j-2][k]-u2[i-2][j-2][k])+
c1*(u2[i+1][j-2][k]-u2[i-1][j-2][k]) )
) +
c1*( mu[i][j+1][k]*met1[i][j+1][k]*met1[i][j+1][k]*(
c2*(u2[i+2][j+1][k]-u2[i-2][j+1][k]) +
c1*(u2[i+1][j+1][k]-u2[i-1][j+1][k]) )
+ mu[i][j-1][k]*met1[i][j-1][k]*met1[i][j-1][k]*(
c2*(u2[i+2][j-1][k]-u2[i-2][j-1][k]) +
c1*(u2[i+1][j-1][k]-u2[i-1][j-1][k])))
+
c2*( la[i+2][j][k]*met1[i+2][j][k]*met1[i+2][j][k]*(
c2*(u2[i+2][j+2][k]-u2[i+2][j-2][k]) +
c1*(u2[i+2][j+1][k]-u2[i+2][j-1][k]) )
+ la[i-2][j][k]*met1[i-2][j][k]*met1[i-2][j][k]*(
c2*(u2[i-2][j+2][k]-u2[i-2][j-2][k])+
c1*(u2[i-2][j+1][k]-u2[i-2][j-1][k]) )
) +
c1*( la[i+1][j][k]*met1[i+1][j][k]*met1[i+1][j][k]*(
c2*(u2[i+1][j+2][k]-u2[i+1][j-2][k]) +
c1*(u2[i+1][j+1][k]-u2[i+1][j-1][k]) )
+ la[i-1][j][k]*met1[i-1][j][k]*met1[i-1][j][k]*(
c2*(u2[i-1][j+2][k]-u2[i-1][j-2][k]) +
c1*(u2[i-1][j+1][k]-u2[i-1][j-1][k])));
r1[i*N*N+j*N+k] += c2*(
(2*mu[i][j][k+2]+la[i][j][k+2])*met2[i][j][k+2]*met1[i][j][k+2]*(
c2*(u1[i+2][j][k+2]-u1[i-2][j][k+2]) +
c1*(u1[i+1][j][k+2]-u1[i-1][j][k+2]) )*strx[i]*stry[j]
+ mu[i][j][k+2]*met3[i][j][k+2]*met1[i][j][k+2]*(
c2*(u2[i+2][j][k+2]-u2[i-2][j][k+2]) +
c1*(u2[i+1][j][k+2]-u2[i-1][j][k+2]) )
+ mu[i][j][k+2]*met4[i][j][k+2]*met1[i][j][k+2]*(
c2*(u3[i+2][j][k+2]-u3[i-2][j][k+2]) +
c1*(u3[i+1][j][k+2]-u3[i-1][j][k+2]) )*stry[j]
+ ((2*mu[i][j][k-2]+la[i][j][k-2])*met2[i][j][k-2]*met1[i][j][k-2]*(
c2*(u1[i+2][j][k-2]-u1[i-2][j][k-2]) +
c1*(u1[i+1][j][k-2]-u1[i-1][j][k-2]) )*strx[i]*stry[j]
+ mu[i][j][k-2]*met3[i][j][k-2]*met1[i][j][k-2]*(
c2*(u2[i+2][j][k-2]-u2[i-2][j][k-2]) +
c1*(u2[i+1][j][k-2]-u2[i-1][j][k-2]) )
+ mu[i][j][k-2]*met4[i][j][k-2]*met1[i][j][k-2]*(
c2*(u3[i+2][j][k-2]-u3[i-2][j][k-2]) +
c1*(u3[i+1][j][k-2]-u3[i-1][j][k-2]) )*stry[j] )
) + c1*(
(2*mu[i][j][k+1]+la[i][j][k+1])*met2[i][j][k+1]*met1[i][j][k+1]*(
c2*(u1[i+2][j][k+1]-u1[i-2][j][k+1]) +
c1*(u1[i+1][j][k+1]-u1[i-1][j][k+1]) )*strx[i+2]*stry[j]
+ mu[i][j][k+1]*met3[i][j][k+1]*met1[i][j][k+1]*(
c2*(u2[i+2][j][k+1]-u2[i-2][j][k+1]) +
c1*(u2[i+1][j][k+1]-u2[i-1][j][k+1]) )
+ mu[i][j][k+1]*met4[i][j][k+1]*met1[i][j][k+1]*(
c2*(u3[i+2][j][k+1]-u3[i-2][j][k+1]) +
c1*(u3[i+1][j][k+1]-u3[i-1][j][k+1]) )*stry[j]
+ ((2*mu[i][j][k-1]+la[i][j][k-1])*met2[i][j][k-1]*met1[i][j][k-1]*(
c2*(u1[i+2][j][k-1]-u1[i-2][j][k-1]) +
c1*(u1[i+1][j][k-1]-u1[i-1][j][k-1]) )*strx[i-2]*stry[j]
+ mu[i][j][k-1]*met3[i][j][k-1]*met1[i][j][k-1]*(
c2*(u2[i+2][j][k-1]-u2[i-2][j][k-1]) +
c1*(u2[i+1][j][k-1]-u2[i-1][j][k-1]) )
+ mu[i][j][k-1]*met4[i][j][k-1]*met1[i][j][k-1]*(
c2*(u3[i+2][j][k-1]-u3[i-2][j][k-1]) +
c1*(u3[i+1][j][k-1]-u3[i-1][j][k-1]) )*stry[j] ) );
r1[i*N*N+j*N+k] += ( c2*(
(2*mu[i+2][j][k]+la[i+2][j][k])*met2[i+2][j][k]*met1[i+2][j][k]*(
c2*(u1[i+2][j][k+2]-u1[i+2][j][k-2]) +
c1*(u1[i+2][j][k+1]-u1[i+2][j][k-1]) )*strx[i]
+ la[i+2][j][k]*met3[i+2][j][k]*met1[i+2][j][k]*(
c2*(u2[i+2][j][k+2]-u2[i+2][j][k-2]) +
c1*(u2[i+2][j][k+1]-u2[i+2][j][k-1]) )*stry[j]
+ la[i+2][j][k]*met4[i+2][j][k]*met1[i+2][j][k]*(
c2*(u3[i+2][j][k+2]-u3[i+2][j][k-2]) +
c1*(u3[i+2][j][k+1]-u3[i+2][j][k-1]) )
+ ((2*mu[i-2][j][k]+la[i-2][j][k])*met2[i-2][j][k]*met1[i-2][j][k]*(
c2*(u1[i-2][j][k+2]-u1[i-2][j][k-2]) +
c1*(u1[i-2][j][k+1]-u1[i-2][j][k-1]) )*strx[i]
+ la[i-2][j][k]*met3[i-2][j][k]*met1[i-2][j][k]*(
c2*(u2[i-2][j][k+2]-u2[i-2][j][k-2]) +
c1*(u2[i-2][j][k+1]-u2[i-2][j][k-1]) )*stry[j]
+ la[i-2][j][k]*met4[i-2][j][k]*met1[i-2][j][k]*(
c2*(u3[i-2][j][k+2]-u3[i-2][j][k-2]) +
c1*(u3[i-2][j][k+1]-u3[i-2][j][k-1]) ) )
) + c1*(
(2*mu[i+1][j][k]+la[i+1][j][k])*met2[i+1][j][k]*met1[i+1][j][k]*(
c2*(u1[i+1][j][k+2]-u1[i+1][j][k-2]) +
c1*(u1[i+1][j][k+1]-u1[i+1][j][k-1]) )*strx[i]
+ la[i+1][j][k]*met3[i+1][j][k]*met1[i+1][j][k]*(
c2*(u2[i+1][j][k+2]-u2[i+1][j][k-2]) +
c1*(u2[i+1][j][k+1]-u2[i+1][j][k-1]) )*stry[j]
+ la[i+1][j][k]*met4[i+1][j][k]*met1[i+1][j][k]*(
c2*(u3[i+1][j][k+2]-u3[i+1][j][k-2]) +
c1*(u3[i+1][j][k+1]-u3[i+1][j][k-1]) )
+ ((2*mu[i-1][j][k]+la[i-1][j][k])*met2[i-1][j][k]*met1[i-1][j][k]*(
c2*(u1[i-1][j][k+2]-u1[i-1][j][k-2]) +
c1*(u1[i-1][j][k+1]-u1[i-1][j][k-1]) )*strx[i]
+ la[i-1][j][k]*met3[i-1][j][k]*met1[i-1][j][k]*(
c2*(u2[i-1][j][k+2]-u2[i-1][j][k-2]) +
c1*(u2[i-1][j][k+1]-u2[i-1][j][k-1]) )*stry[j]
+ la[i-1][j][k]*met4[i-1][j][k]*met1[i-1][j][k]*(
c2*(u3[i-1][j][k+2]-u3[i-1][j][k-2]) +
c1*(u3[i-1][j][k+1]-u3[i-1][j][k-1]) ) ) ) )*stry[j];
r1[i*N*N+j*N+k] += c2*(
mu[i][j+2][k]*met3[i][j+2][k]*met1[i][j+2][k]*(
c2*(u1[i][j+2][k+2]-u1[i][j+2][k-2]) +
c1*(u1[i][j+2][k+1]-u1[i][j+2][k-1]) )*stry[j+1]*strx[i]
+ mu[i][j+2][k]*met2[i][j+2][k]*met1[i][j+2][k]*(
c2*(u2[i][j+2][k+2]-u2[i][j+2][k-2]) +
c1*(u2[i][j+2][k+1]-u2[i][j+2][k-1]) )
+ ( mu[i][j-2][k]*met3[i][j-2][k]*met1[i][j-2][k]*(
c2*(u1[i][j-2][k+2]-u1[i][j-2][k-2]) +
c1*(u1[i][j-2][k+1]-u1[i][j-2][k-1]) )*stry[j]*strx[i]
+ mu[i][j-2][k]*met2[i][j-2][k]*met1[i][j-2][k]*(
c2*(u2[i][j-2][k+2]-u2[i][j-2][k-2]) +
c1*(u2[i][j-2][k+1]-u2[i][j-2][k-1]) ) )
) + c1*(
mu[i][j+1][k]*met3[i][j+1][k]*met1[i][j+1][k]*(
c2*(u1[i][j+1][k+2]-u1[i][j+1][k-2]) +
c1*(u1[i][j+1][k+1]-u1[i][j+1][k-1]) )*stry[j-1]*strx[i]
+ mu[i][j+1][k]*met2[i][j+1][k]*met1[i][j+1][k]*(
c2*(u2[i][j+1][k+2]-u2[i][j+1][k-2]) +
c1*(u2[i][j+1][k+1]-u2[i][j+1][k-1]) )
+ ( mu[i][j-1][k]*met3[i][j-1][k]*met1[i][j-1][k]*(
c2*(u1[i][j-1][k+2]-u1[i][j-1][k-2]) +
c1*(u1[i][j-1][k+1]-u1[i][j-1][k-1]) )*stry[j]*strx[i]
+ mu[i][j-1][k]*met2[i][j-1][k]*met1[i][j-1][k]*(
c2*(u2[i][j-1][k+2]-u2[i][j-1][k-2]) +
c1*(u2[i][j-1][k+1]-u2[i][j-1][k-1]) ) ) );
r1[i*N*N+j*N+k] += c2*(
mu[i][j][k+2]*met3[i][j][k+2]*met1[i][j][k+2]*(
c2*(u1[i][j+2][k+2]-u1[i][j-2][k+2]) +
c1*(u1[i][j+1][k+2]-u1[i][j-1][k+2]) )*stry[j+2]*strx[i]
+ la[i][j][k+2]*met2[i][j][k+2]*met1[i][j][k+2]*(
c2*(u2[i][j+2][k+2]-u2[i][j-2][k+2]) +
c1*(u2[i][j+1][k+2]-u2[i][j-1][k+2]) )
+ ( mu[i][j][k-2]*met3[i][j][k-2]*met1[i][j][k-2]*(
c2*(u1[i][j+2][k-2]-u1[i][j-2][k-2]) +
c1*(u1[i][j+1][k-2]-u1[i][j-1][k-2]) )*stry[j]*strx[i]
+ la[i][j][k-2]*met2[i][j][k-2]*met1[i][j][k-2]*(
c2*(u2[i][j+2][k-2]-u2[i][j-2][k-2]) +
c1*(u2[i][j+1][k-2]-u2[i][j-1][k-2]) ) )
) + c1*(
mu[i][j][k+1]*met3[i][j][k+1]*met1[i][j][k+1]*(
c2*(u1[i][j+2][k+1]-u1[i][j-2][k+1]) +
c1*(u1[i][j+1][k+1]-u1[i][j-1][k+1]) )*stry[j-2]*strx[i]
+ la[i][j][k+1]*met2[i][j][k+1]*met1[i][j][k+1]*(
c2*(u2[i][j+2][k+1]-u2[i][j-2][k+1]) +
c1*(u2[i][j+1][k+1]-u2[i][j-1][k+1]) )
+ ( mu[i][j][k-1]*met3[i][j][k-1]*met1[i][j][k-1]*(
c2*(u1[i][j+2][k-1]-u1[i][j-2][k-1]) +
c1*(u1[i][j+1][k-1]-u1[i][j-1][k-1]) )*stry[j]*strx[i]
+ la[i][j][k-1]*met2[i][j][k-1]*met1[i][j][k-1]*(
c2*(u2[i][j+2][k-1]-u2[i][j-2][k-1]) +
c1*(u2[i][j+1][k-1]-u2[i][j-1][k-1]) ) ) );
}
}
}
extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) {
double *r1;
hipMalloc (&r1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for r1\n");
hipMemcpy (r1, h_r1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u1;
hipMalloc (&u1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u1\n");
hipMemcpy (u1, h_u1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u2;
hipMalloc (&u2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u2\n");
hipMemcpy (u2, h_u2, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u3;
hipMalloc (&u3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u3\n");
hipMemcpy (u3, h_u3, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *mu;
hipMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
hipMemcpy (mu, h_mu, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *la;
hipMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
hipMemcpy (la, h_la, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met1;
hipMalloc (&met1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met1\n");
hipMemcpy (met1, h_met1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met2;
hipMalloc (&met2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met2\n");
hipMemcpy (met2, h_met2, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met3;
hipMalloc (&met3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met3\n");
hipMemcpy (met3, h_met3, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met4;
hipMalloc (&met4, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met4\n");
hipMemcpy (met4, h_met4, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *strx;
hipMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
hipMemcpy (strx, h_strx, sizeof(double)*N, hipMemcpyHostToDevice);
double *stry;
hipMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
hipMemcpy (stry, h_stry, sizeof(double)*N, hipMemcpyHostToDevice);
dim3 blockconfig (16, 16);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
hipLaunchKernelGGL(( curvi) , dim3(gridconfig), dim3(blockconfig), 0, 0, r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
hipMemcpy (h_r1, r1, sizeof(double)*N*N*N, hipMemcpyDeviceToHost);
}
| f16fcf919eca60c6733da2de91e578189c8510a2.cu | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void curvi (double * __restrict__ r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * __restrict__ strx, double * __restrict__ stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3) {
for (int i=2; i<=N-3; i++) {
r1[i*N*N+j*N+k] +=
c2*( mu[i][j+2][k]*met1[i][j+2][k]*met1[i][j+2][k]*(
c2*(u2[i+2][j+2][k]-u2[i-2][j+2][k]) +
c1*(u2[i+1][j+2][k]-u2[i-1][j+2][k]) )
+ mu[i][j-2][k]*met1[i][j-2][k]*met1[i][j-2][k]*(
c2*(u2[i+2][j-2][k]-u2[i-2][j-2][k])+
c1*(u2[i+1][j-2][k]-u2[i-1][j-2][k]) )
) +
c1*( mu[i][j+1][k]*met1[i][j+1][k]*met1[i][j+1][k]*(
c2*(u2[i+2][j+1][k]-u2[i-2][j+1][k]) +
c1*(u2[i+1][j+1][k]-u2[i-1][j+1][k]) )
+ mu[i][j-1][k]*met1[i][j-1][k]*met1[i][j-1][k]*(
c2*(u2[i+2][j-1][k]-u2[i-2][j-1][k]) +
c1*(u2[i+1][j-1][k]-u2[i-1][j-1][k])))
+
c2*( la[i+2][j][k]*met1[i+2][j][k]*met1[i+2][j][k]*(
c2*(u2[i+2][j+2][k]-u2[i+2][j-2][k]) +
c1*(u2[i+2][j+1][k]-u2[i+2][j-1][k]) )
+ la[i-2][j][k]*met1[i-2][j][k]*met1[i-2][j][k]*(
c2*(u2[i-2][j+2][k]-u2[i-2][j-2][k])+
c1*(u2[i-2][j+1][k]-u2[i-2][j-1][k]) )
) +
c1*( la[i+1][j][k]*met1[i+1][j][k]*met1[i+1][j][k]*(
c2*(u2[i+1][j+2][k]-u2[i+1][j-2][k]) +
c1*(u2[i+1][j+1][k]-u2[i+1][j-1][k]) )
+ la[i-1][j][k]*met1[i-1][j][k]*met1[i-1][j][k]*(
c2*(u2[i-1][j+2][k]-u2[i-1][j-2][k]) +
c1*(u2[i-1][j+1][k]-u2[i-1][j-1][k])));
r1[i*N*N+j*N+k] += c2*(
(2*mu[i][j][k+2]+la[i][j][k+2])*met2[i][j][k+2]*met1[i][j][k+2]*(
c2*(u1[i+2][j][k+2]-u1[i-2][j][k+2]) +
c1*(u1[i+1][j][k+2]-u1[i-1][j][k+2]) )*strx[i]*stry[j]
+ mu[i][j][k+2]*met3[i][j][k+2]*met1[i][j][k+2]*(
c2*(u2[i+2][j][k+2]-u2[i-2][j][k+2]) +
c1*(u2[i+1][j][k+2]-u2[i-1][j][k+2]) )
+ mu[i][j][k+2]*met4[i][j][k+2]*met1[i][j][k+2]*(
c2*(u3[i+2][j][k+2]-u3[i-2][j][k+2]) +
c1*(u3[i+1][j][k+2]-u3[i-1][j][k+2]) )*stry[j]
+ ((2*mu[i][j][k-2]+la[i][j][k-2])*met2[i][j][k-2]*met1[i][j][k-2]*(
c2*(u1[i+2][j][k-2]-u1[i-2][j][k-2]) +
c1*(u1[i+1][j][k-2]-u1[i-1][j][k-2]) )*strx[i]*stry[j]
+ mu[i][j][k-2]*met3[i][j][k-2]*met1[i][j][k-2]*(
c2*(u2[i+2][j][k-2]-u2[i-2][j][k-2]) +
c1*(u2[i+1][j][k-2]-u2[i-1][j][k-2]) )
+ mu[i][j][k-2]*met4[i][j][k-2]*met1[i][j][k-2]*(
c2*(u3[i+2][j][k-2]-u3[i-2][j][k-2]) +
c1*(u3[i+1][j][k-2]-u3[i-1][j][k-2]) )*stry[j] )
) + c1*(
(2*mu[i][j][k+1]+la[i][j][k+1])*met2[i][j][k+1]*met1[i][j][k+1]*(
c2*(u1[i+2][j][k+1]-u1[i-2][j][k+1]) +
c1*(u1[i+1][j][k+1]-u1[i-1][j][k+1]) )*strx[i+2]*stry[j]
+ mu[i][j][k+1]*met3[i][j][k+1]*met1[i][j][k+1]*(
c2*(u2[i+2][j][k+1]-u2[i-2][j][k+1]) +
c1*(u2[i+1][j][k+1]-u2[i-1][j][k+1]) )
+ mu[i][j][k+1]*met4[i][j][k+1]*met1[i][j][k+1]*(
c2*(u3[i+2][j][k+1]-u3[i-2][j][k+1]) +
c1*(u3[i+1][j][k+1]-u3[i-1][j][k+1]) )*stry[j]
+ ((2*mu[i][j][k-1]+la[i][j][k-1])*met2[i][j][k-1]*met1[i][j][k-1]*(
c2*(u1[i+2][j][k-1]-u1[i-2][j][k-1]) +
c1*(u1[i+1][j][k-1]-u1[i-1][j][k-1]) )*strx[i-2]*stry[j]
+ mu[i][j][k-1]*met3[i][j][k-1]*met1[i][j][k-1]*(
c2*(u2[i+2][j][k-1]-u2[i-2][j][k-1]) +
c1*(u2[i+1][j][k-1]-u2[i-1][j][k-1]) )
+ mu[i][j][k-1]*met4[i][j][k-1]*met1[i][j][k-1]*(
c2*(u3[i+2][j][k-1]-u3[i-2][j][k-1]) +
c1*(u3[i+1][j][k-1]-u3[i-1][j][k-1]) )*stry[j] ) );
r1[i*N*N+j*N+k] += ( c2*(
(2*mu[i+2][j][k]+la[i+2][j][k])*met2[i+2][j][k]*met1[i+2][j][k]*(
c2*(u1[i+2][j][k+2]-u1[i+2][j][k-2]) +
c1*(u1[i+2][j][k+1]-u1[i+2][j][k-1]) )*strx[i]
+ la[i+2][j][k]*met3[i+2][j][k]*met1[i+2][j][k]*(
c2*(u2[i+2][j][k+2]-u2[i+2][j][k-2]) +
c1*(u2[i+2][j][k+1]-u2[i+2][j][k-1]) )*stry[j]
+ la[i+2][j][k]*met4[i+2][j][k]*met1[i+2][j][k]*(
c2*(u3[i+2][j][k+2]-u3[i+2][j][k-2]) +
c1*(u3[i+2][j][k+1]-u3[i+2][j][k-1]) )
+ ((2*mu[i-2][j][k]+la[i-2][j][k])*met2[i-2][j][k]*met1[i-2][j][k]*(
c2*(u1[i-2][j][k+2]-u1[i-2][j][k-2]) +
c1*(u1[i-2][j][k+1]-u1[i-2][j][k-1]) )*strx[i]
+ la[i-2][j][k]*met3[i-2][j][k]*met1[i-2][j][k]*(
c2*(u2[i-2][j][k+2]-u2[i-2][j][k-2]) +
c1*(u2[i-2][j][k+1]-u2[i-2][j][k-1]) )*stry[j]
+ la[i-2][j][k]*met4[i-2][j][k]*met1[i-2][j][k]*(
c2*(u3[i-2][j][k+2]-u3[i-2][j][k-2]) +
c1*(u3[i-2][j][k+1]-u3[i-2][j][k-1]) ) )
) + c1*(
(2*mu[i+1][j][k]+la[i+1][j][k])*met2[i+1][j][k]*met1[i+1][j][k]*(
c2*(u1[i+1][j][k+2]-u1[i+1][j][k-2]) +
c1*(u1[i+1][j][k+1]-u1[i+1][j][k-1]) )*strx[i]
+ la[i+1][j][k]*met3[i+1][j][k]*met1[i+1][j][k]*(
c2*(u2[i+1][j][k+2]-u2[i+1][j][k-2]) +
c1*(u2[i+1][j][k+1]-u2[i+1][j][k-1]) )*stry[j]
+ la[i+1][j][k]*met4[i+1][j][k]*met1[i+1][j][k]*(
c2*(u3[i+1][j][k+2]-u3[i+1][j][k-2]) +
c1*(u3[i+1][j][k+1]-u3[i+1][j][k-1]) )
+ ((2*mu[i-1][j][k]+la[i-1][j][k])*met2[i-1][j][k]*met1[i-1][j][k]*(
c2*(u1[i-1][j][k+2]-u1[i-1][j][k-2]) +
c1*(u1[i-1][j][k+1]-u1[i-1][j][k-1]) )*strx[i]
+ la[i-1][j][k]*met3[i-1][j][k]*met1[i-1][j][k]*(
c2*(u2[i-1][j][k+2]-u2[i-1][j][k-2]) +
c1*(u2[i-1][j][k+1]-u2[i-1][j][k-1]) )*stry[j]
+ la[i-1][j][k]*met4[i-1][j][k]*met1[i-1][j][k]*(
c2*(u3[i-1][j][k+2]-u3[i-1][j][k-2]) +
c1*(u3[i-1][j][k+1]-u3[i-1][j][k-1]) ) ) ) )*stry[j];
r1[i*N*N+j*N+k] += c2*(
mu[i][j+2][k]*met3[i][j+2][k]*met1[i][j+2][k]*(
c2*(u1[i][j+2][k+2]-u1[i][j+2][k-2]) +
c1*(u1[i][j+2][k+1]-u1[i][j+2][k-1]) )*stry[j+1]*strx[i]
+ mu[i][j+2][k]*met2[i][j+2][k]*met1[i][j+2][k]*(
c2*(u2[i][j+2][k+2]-u2[i][j+2][k-2]) +
c1*(u2[i][j+2][k+1]-u2[i][j+2][k-1]) )
+ ( mu[i][j-2][k]*met3[i][j-2][k]*met1[i][j-2][k]*(
c2*(u1[i][j-2][k+2]-u1[i][j-2][k-2]) +
c1*(u1[i][j-2][k+1]-u1[i][j-2][k-1]) )*stry[j]*strx[i]
+ mu[i][j-2][k]*met2[i][j-2][k]*met1[i][j-2][k]*(
c2*(u2[i][j-2][k+2]-u2[i][j-2][k-2]) +
c1*(u2[i][j-2][k+1]-u2[i][j-2][k-1]) ) )
) + c1*(
mu[i][j+1][k]*met3[i][j+1][k]*met1[i][j+1][k]*(
c2*(u1[i][j+1][k+2]-u1[i][j+1][k-2]) +
c1*(u1[i][j+1][k+1]-u1[i][j+1][k-1]) )*stry[j-1]*strx[i]
+ mu[i][j+1][k]*met2[i][j+1][k]*met1[i][j+1][k]*(
c2*(u2[i][j+1][k+2]-u2[i][j+1][k-2]) +
c1*(u2[i][j+1][k+1]-u2[i][j+1][k-1]) )
+ ( mu[i][j-1][k]*met3[i][j-1][k]*met1[i][j-1][k]*(
c2*(u1[i][j-1][k+2]-u1[i][j-1][k-2]) +
c1*(u1[i][j-1][k+1]-u1[i][j-1][k-1]) )*stry[j]*strx[i]
+ mu[i][j-1][k]*met2[i][j-1][k]*met1[i][j-1][k]*(
c2*(u2[i][j-1][k+2]-u2[i][j-1][k-2]) +
c1*(u2[i][j-1][k+1]-u2[i][j-1][k-1]) ) ) );
r1[i*N*N+j*N+k] += c2*(
mu[i][j][k+2]*met3[i][j][k+2]*met1[i][j][k+2]*(
c2*(u1[i][j+2][k+2]-u1[i][j-2][k+2]) +
c1*(u1[i][j+1][k+2]-u1[i][j-1][k+2]) )*stry[j+2]*strx[i]
+ la[i][j][k+2]*met2[i][j][k+2]*met1[i][j][k+2]*(
c2*(u2[i][j+2][k+2]-u2[i][j-2][k+2]) +
c1*(u2[i][j+1][k+2]-u2[i][j-1][k+2]) )
+ ( mu[i][j][k-2]*met3[i][j][k-2]*met1[i][j][k-2]*(
c2*(u1[i][j+2][k-2]-u1[i][j-2][k-2]) +
c1*(u1[i][j+1][k-2]-u1[i][j-1][k-2]) )*stry[j]*strx[i]
+ la[i][j][k-2]*met2[i][j][k-2]*met1[i][j][k-2]*(
c2*(u2[i][j+2][k-2]-u2[i][j-2][k-2]) +
c1*(u2[i][j+1][k-2]-u2[i][j-1][k-2]) ) )
) + c1*(
mu[i][j][k+1]*met3[i][j][k+1]*met1[i][j][k+1]*(
c2*(u1[i][j+2][k+1]-u1[i][j-2][k+1]) +
c1*(u1[i][j+1][k+1]-u1[i][j-1][k+1]) )*stry[j-2]*strx[i]
+ la[i][j][k+1]*met2[i][j][k+1]*met1[i][j][k+1]*(
c2*(u2[i][j+2][k+1]-u2[i][j-2][k+1]) +
c1*(u2[i][j+1][k+1]-u2[i][j-1][k+1]) )
+ ( mu[i][j][k-1]*met3[i][j][k-1]*met1[i][j][k-1]*(
c2*(u1[i][j+2][k-1]-u1[i][j-2][k-1]) +
c1*(u1[i][j+1][k-1]-u1[i][j-1][k-1]) )*stry[j]*strx[i]
+ la[i][j][k-1]*met2[i][j][k-1]*met1[i][j][k-1]*(
c2*(u2[i][j+2][k-1]-u2[i][j-2][k-1]) +
c1*(u2[i][j+1][k-1]-u2[i][j-1][k-1]) ) ) );
}
}
}
extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) {
double *r1;
cudaMalloc (&r1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for r1\n");
cudaMemcpy (r1, h_r1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u1;
cudaMalloc (&u1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u1\n");
cudaMemcpy (u1, h_u1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u2;
cudaMalloc (&u2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u2\n");
cudaMemcpy (u2, h_u2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u3;
cudaMalloc (&u3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u3\n");
cudaMemcpy (u3, h_u3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met1;
cudaMalloc (&met1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met1\n");
cudaMemcpy (met1, h_met1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met2;
cudaMalloc (&met2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met2\n");
cudaMemcpy (met2, h_met2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met3;
cudaMalloc (&met3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met3\n");
cudaMemcpy (met3, h_met3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met4;
cudaMalloc (&met4, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met4\n");
cudaMemcpy (met4, h_met4, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 16);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
curvi <<<gridconfig, blockconfig>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
cudaMemcpy (h_r1, r1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
}
|
77400ba42d883656c84cdc14737a931caa3022b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <inttypes.h>
#include <cstring>
#include <hipcub/hipcub.hpp>
#include <deque>
#include <rmm/device_buffer.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <vector>
#include "HugeCTR/include/common.hpp"
#include "HugeCTR/include/resource_manager.hpp"
namespace HugeCTR {
__device__ __forceinline__ unsigned int __mylaneid() {
unsigned int laneid;
asm volatile("mov.u32 %0, %laneid;" : "=r"(laneid));
return laneid;
}
__device__ __forceinline__ unsigned int abs(unsigned int x) { return x; }
template <typename T>
__global__ void dense_data_converter_kernel__(int64_t *dense_data_column_ptrs,
const int label_dense_dim, int batch_size,
int num_dense_buffers, int64_t *dense_data_out_ptrs) {
// extern __shared__ char smem_[];
// 8 warps/block
int tile_w = 32; // 32x32 tile
int smem_pitch = 33;
__shared__ T smem_staging_ptr[8 * 32 * 33];
int start_idx = threadIdx.x + (blockDim.x * blockIdx.x);
// outer loop on label_dense_dim
for (int i = 0; i < label_dense_dim; i += warpSize) {
// stage 32x32 tile - stage 32 columns of data
// warpSize drives the row dim to 32
for (int j = 0; j < tile_w; j++) {
// warp does one column at a time
int col = i + j;
if (col < label_dense_dim) {
int64_t addr = dense_data_column_ptrs[col];
T *data = reinterpret_cast<T *>(addr);
if (start_idx < batch_size) {
smem_staging_ptr[threadIdx.x * smem_pitch + j] = data[start_idx];
}
}
}
__syncthreads();
// write out
int out_row_idx = __shfl_sync(0xffffffff, start_idx, 0);
// each warp writes out 32 rows and whatever active columns in j(32)
if ((__mylaneid() + i) < label_dense_dim) {
// activate threads
// blockStrided warp over 32 rows write out
int warp_id = threadIdx.x / warpSize;
int smem_row = warp_id * warpSize;
// warpsize doing tile_h
for (int j = 0; j < warpSize; j++) {
if ((j + out_row_idx) < batch_size) {
int curr_out_row = j + out_row_idx;
int buffer_id = curr_out_row / (batch_size / num_dense_buffers);
int local_id = curr_out_row % (batch_size / num_dense_buffers);
int64_t addr = dense_data_out_ptrs[buffer_id];
T *data = reinterpret_cast<T *>(addr);
data[(local_id * label_dense_dim) + __mylaneid() + i] =
smem_staging_ptr[(smem_row + j) * smem_pitch + __mylaneid()];
}
}
}
__syncthreads();
}
}
template <typename T>
__global__ void cat_local_slot_converter_kernel__(int64_t *cat_data_column_ptrs, int num_params,
int param_id, int num_slots, int batch_size,
int num_devices, int32_t *dev_slot_per_device,
T *dev_slot_offset_ptr,
int64_t *dev_csr_value_ptr,
int64_t *dev_csr_row_offset_ptr,
uint32_t *dev_csr_row_offset_counter) {
// 8 warps/block
int tile_w = 32; // 32x32 tile
int smem_pitch = 33;
extern __shared__ char smem_[];
T *smem_staging_ptr = reinterpret_cast<T *>(smem_);
int num_warps = blockDim.x / warpSize;
uint32_t *block_atomic_accum =
reinterpret_cast<uint32_t *>(smem_ + (num_warps * warpSize * smem_pitch * sizeof(T)));
// zero out smem_accum
for (int i = threadIdx.x; i < num_params * num_devices; i += blockDim.x)
block_atomic_accum[i] = 0;
__syncthreads();
int start_idx = threadIdx.x + (blockDim.x * blockIdx.x);
for (int i = 0; i < num_slots; i += warpSize) {
// stage 32x32 tile - stage 32 columns of data
// warpSize drives the row dim to 32
for (int j = 0; j < tile_w; j++) {
// warp does one column at a time
int col = i + j;
if (col < num_slots) {
int64_t addr = cat_data_column_ptrs[col];
T *data = reinterpret_cast<T *>(addr);
if (start_idx < batch_size) {
smem_staging_ptr[threadIdx.x * smem_pitch + j] =
data[start_idx] + dev_slot_offset_ptr[col];
}
}
}
__syncthreads();
// write out
int out_row_idx = __shfl_sync(0xffffffff, start_idx, 0);
// each warp writes out 32 rows and whatever active columns in j(32)
if ((__mylaneid() + i) < num_slots) {
// activate threads
// blockStrided warp over 32 rows write out
int warp_id = threadIdx.x / warpSize;
int smem_row = warp_id * warpSize;
// warpsize doing tile_h
for (int j = 0; j < warpSize; j++) {
if ((j + out_row_idx) < batch_size) {
int curr_out_row = j + out_row_idx; // batch n
// select buffer:, use dev_slot_per_device for row in that and value buffer
int slot_id = (__mylaneid() + i);
int dev_id = slot_id % num_devices;
int buffer_id = dev_id * num_params + param_id;
int local_id = (slot_id - dev_id) / num_devices;
int64_t addr = dev_csr_value_ptr[buffer_id]; // dev_csr_value_ptr
T *data = reinterpret_cast<T *>(addr);
uint32_t idx_to_buffers = curr_out_row * dev_slot_per_device[dev_id] + local_id;
data[idx_to_buffers] = smem_staging_ptr[(smem_row + j) * smem_pitch + __mylaneid()];
int64_t addr_row_buf = dev_csr_row_offset_ptr[buffer_id];
T *row_offset = reinterpret_cast<T *>(addr_row_buf);
row_offset[idx_to_buffers] = 1;
atomicAdd(&(block_atomic_accum[buffer_id]), 1);
}
}
}
__syncthreads();
}
// update global atomic buffer
for (int i = threadIdx.x; i < num_params * num_devices; i += blockDim.x) {
atomicAdd(&(dev_csr_row_offset_counter[i]), block_atomic_accum[i]);
}
}
template <typename T>
__global__ void cat_distributed_slot_csr_roffset_kernel__(int64_t *cat_data_column_ptrs,
int num_params, int param_id,
int num_slots, int batch_size,
int num_devices, T *dev_slot_offset_ptr,
int64_t *dev_csr_row_offset_ptr,
uint32_t *dev_csr_row_offset_counter) {
// 8 warps/block
int tile_w = 32; // 32x32 tile
int smem_pitch = 33;
extern __shared__ char smem_[];
T *smem_staging_ptr = reinterpret_cast<T *>(smem_);
int num_warps = blockDim.x / warpSize;
uint32_t *block_atomic_accum =
reinterpret_cast<uint32_t *>(smem_ + (num_warps * warpSize * smem_pitch * sizeof(T)));
// zero out smem_accum
for (int i = threadIdx.x; i < num_params * num_devices; i += blockDim.x)
block_atomic_accum[i] = 0;
__syncthreads();
int start_idx = threadIdx.x + (blockDim.x * blockIdx.x);
for (int i = 0; i < num_slots; i += warpSize) {
// stage 32x32 tile - stage 32 columns of data
// warpSize drives the row dim to 32
for (int j = 0; j < tile_w; j++) {
// warp does one column at a time
int col = i + j;
if (col < num_slots) {
int64_t addr = cat_data_column_ptrs[col];
T *data = reinterpret_cast<T *>(addr);
if (start_idx < batch_size) {
smem_staging_ptr[threadIdx.x * smem_pitch + j] =
data[start_idx] + dev_slot_offset_ptr[col];
}
}
}
__syncthreads();
// write out
int out_row_idx = __shfl_sync(0xffffffff, start_idx, 0);
// each warp writes out 32 rows and whatever active columns in j(32)
if ((__mylaneid() + i) < num_slots) {
// activate threads
// blockStrided warp over 32 rows write out
int warp_id = threadIdx.x / warpSize;
int smem_row = warp_id * warpSize;
// warpsize doing tile_h
for (int j = 0; j < warpSize; j++) {
if ((j + out_row_idx) < batch_size) {
int curr_out_row = j + out_row_idx; // batch n
T val = smem_staging_ptr[(smem_row + j) * smem_pitch + __mylaneid()];
int dev_id = abs(val % num_devices);
int buffer_id = dev_id * num_params + param_id;
int slot_id = (__mylaneid() + i);
// adjust this with prev param count as well
uint32_t idx_to_buffers = curr_out_row * num_slots + slot_id;
// dont need this if using staging for initial idx markers
// idx_to_buffers += param_offset_buf[buffer_id]; // TBD
int64_t addr_row_buf = dev_csr_row_offset_ptr[buffer_id];
T *row_offset = reinterpret_cast<T *>(addr_row_buf);
row_offset[idx_to_buffers] = 1;
// row_offset[idx_to_buffers] = 1 + param_offset_buf[buffer_id];
// for ex scan to get write values in distributed row_offset csr buffers
if ((slot_id == (num_slots - 1)) && (curr_out_row == (batch_size - 1)))
row_offset[idx_to_buffers + 1] = 1;
atomicAdd(&(block_atomic_accum[buffer_id]), 1);
}
}
}
__syncthreads();
}
// update global atomic buffer
for (int i = threadIdx.x; i < num_params * num_devices; i += blockDim.x) {
atomicAdd(&(dev_csr_row_offset_counter[i]), block_atomic_accum[i]);
}
}
template <typename T>
__global__ void cat_distributed_slot_csr_val_kernel__(int64_t *cat_data_column_ptrs, int num_params,
int param_id, int num_slots, int batch_size,
int num_devices, T *dev_slot_offset_ptr,
int64_t *dev_csr_row_val_ptr,
int64_t *dev_csr_row_offset_ptr) {
// 8 warps/block
int tile_w = 32; // 32x32 tile
int smem_pitch = 33;
extern __shared__ char smem_[];
T *smem_staging_ptr = reinterpret_cast<T *>(smem_);
int start_idx = threadIdx.x + (blockDim.x * blockIdx.x);
for (int i = 0; i < num_slots; i += warpSize) {
// stage 32x32 tile - stage 32 columns of data
// warpSize drives the row dim to 32
for (int j = 0; j < tile_w; j++) {
// warp does one column at a time
int col = i + j;
if (col < num_slots) {
int64_t addr = cat_data_column_ptrs[col];
T *data = reinterpret_cast<T *>(addr);
if (start_idx < batch_size) {
smem_staging_ptr[threadIdx.x * smem_pitch + j] =
data[start_idx] + dev_slot_offset_ptr[col];
}
}
}
__syncthreads();
// write out
int out_row_idx = __shfl_sync(0xffffffff, start_idx, 0);
// each warp writes out 32 rows and whatever active columns in j(32)
if ((__mylaneid() + i) < num_slots) {
// activate threads
// blockStrided warp over 32 rows write out
int warp_id = threadIdx.x / warpSize;
int smem_row = warp_id * warpSize;
// warpsize doing tile_h
for (int j = 0; j < warpSize; j++) {
if ((j + out_row_idx) < batch_size) {
int curr_out_row = j + out_row_idx; // batch n
T val = smem_staging_ptr[(smem_row + j) * smem_pitch + __mylaneid()];
int dev_id = abs(val % num_devices);
int buffer_id = dev_id * num_params + param_id;
int slot_id = (__mylaneid() + i);
uint32_t idx_to_buffers = curr_out_row * num_slots + slot_id;
int64_t addr_row_buf = dev_csr_row_offset_ptr[buffer_id];
T *row_offset = reinterpret_cast<T *>(addr_row_buf);
T idx = row_offset[idx_to_buffers];
int64_t addr_val_buf = dev_csr_row_val_ptr[buffer_id];
T *val_buf = reinterpret_cast<T *>(addr_val_buf);
val_buf[idx] = val;
}
}
}
__syncthreads();
}
}
template <typename T>
__global__ void check_and_set_csr_row_kernel_(int64_t *dev_csr_row_offset_ptr,
uint32_t *dev_csr_row_offset_counter,
int max_elements_csr_row, int buffer_id) {
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
int64_t addr = dev_csr_row_offset_ptr[buffer_id];
T *data = reinterpret_cast<T *>(addr);
if (idx < max_elements_csr_row) {
T val = data[idx];
if (val == 1) {
data[idx] = (T)idx;
} else if (idx == dev_csr_row_offset_counter[buffer_id]) {
data[idx] = (T)idx;
}
}
}
/**
* Interleave dense (continuous) data parquet columns and write to linear buffer
* @param dense_column_data_ptr vector of device pointers to Parquet columns
* @param label_dense_dim number of dense values
* @param batch_size batch size to load
* @param num_dense_buffers number of dense buffers
* @param dense_data_buffers vector of device buffers to write output
* @param dev_ptr_staging pointer to pinned memory for copying pointer address from h2d
* @param rmm_resources Queue to hold reference to RMM allocations
* @param mr Device memory resource for RMM allocations
* @param task_stream Stream to allocate memory and launch kerenels
*/
template <typename T>
void convert_parquet_dense_columns(std::vector<T *> &dense_column_data_ptr,
const int label_dense_dim, int batch_size, int num_dense_buffers,
std::vector<rmm::device_buffer> &dense_data_buffers,
int64_t *dev_ptr_staging,
std::deque<rmm::device_buffer> &rmm_resources,
rmm::mr::device_memory_resource *mr, hipStream_t task_stream) {
// tiled load and transpose
size_t size_of_col_ptrs = dense_column_data_ptr.size() * sizeof(T *);
std::memcpy(dev_ptr_staging, dense_column_data_ptr.data(), size_of_col_ptrs);
rmm_resources.emplace_back(size_of_col_ptrs, task_stream, mr);
rmm::device_buffer &dev_in_column_ptr = rmm_resources.back();
CK_CUDA_THROW_(hipMemcpyAsync(dev_in_column_ptr.data(), dev_ptr_staging, size_of_col_ptrs,
hipMemcpyHostToDevice, task_stream));
int64_t *pinned_dev_out_buffer =
reinterpret_cast<int64_t *>((size_t)(dev_ptr_staging) + size_of_col_ptrs);
for (unsigned int i = 0; i < dense_data_buffers.size(); i++) {
pinned_dev_out_buffer[i] = (int64_t)dense_data_buffers[i].data();
}
size_t size_of_out_ptrs = dense_data_buffers.size() * sizeof(int64_t);
rmm_resources.emplace_back(size_of_out_ptrs, task_stream, mr);
rmm::device_buffer &dev_out_data_ptr = rmm_resources.back();
CK_CUDA_THROW_(hipMemcpyAsync(dev_out_data_ptr.data(), pinned_dev_out_buffer, size_of_out_ptrs,
hipMemcpyHostToDevice, task_stream));
// assuming 48KB smem/SM
// 32x32 tile per warp -> 4096 bytes/warp
// 12 warps -> 384 threads/block
// size_t smem_size = 48 * 1024 * 1024;
dim3 block(256, 1, 1);
dim3 grid((batch_size - 1) / block.x + 1, 1, 1);
hipLaunchKernelGGL(( dense_data_converter_kernel__<T>), dim3(grid), dim3(block), 0, task_stream,
(int64_t *)dev_in_column_ptr.data(), label_dense_dim, batch_size, num_dense_buffers,
(int64_t *)dev_out_data_ptr.data());
CK_CUDA_THROW_(hipGetLastError());
return;
}
/**
* Interleave categoricals (slot) data parquet columns and write to csr buffers
* @param cat_column_data_ptr vector of device pointers to Parquet columns
* @param num_params number of Embedding params
* @param param_id param idx for current param
* @param num_slots number of slots in current param
* @param batch_size batch size to load
* @param num_csr_buffers number of csr buffers in csr heap
* @param num_devices number of gpu devices
* @param distributed_slot flag to set distributed slot processing
* @param pid pid of node
* @param resource_manager ResourceManager handle for session
* @param csr_value_buffers vector of device buffers to write csr values
* @param csr_row_offset_buffers vector of device buffers to write csr row offset values
* @param dev_ptr_staging pointer to pinned memory for copying pointer address from h2d
* @param dev_embed_param_offset_buf memory to atomically accumulate values written to csr val buf
* @param dev_slot_offset_ptr device buffer with value for slot value offsets to make unique index
* @param rmm_resources Queue to hold reference to RMM allocations
* @param mr Device memory resource for RMM allocations
* @param task_stream Stream to allocate memory and launch kerenels
*/
// for nnz =1 csr size_of_value and size_of_row_offset inc will be same
template <typename T>
size_t convert_parquet_cat_columns(std::vector<T *> &cat_column_data_ptr, int num_params,
int param_id, int num_slots, int batch_size, int num_csr_buffers,
int num_devices, bool distributed_slot, int pid,
const std::shared_ptr<ResourceManager> resource_manager,
std::vector<rmm::device_buffer> &csr_value_buffers,
std::vector<rmm::device_buffer> &csr_row_offset_buffers,
int64_t *dev_ptr_staging, uint32_t *dev_embed_param_offset_buf,
T *dev_slot_offset_ptr,
std::deque<rmm::device_buffer> &rmm_resources,
rmm::mr::device_memory_resource *mr,
hipStream_t task_stream) {
size_t pinned_staging_elements_used = 0;
// tiled load and transpose
size_t size_of_col_ptrs = cat_column_data_ptr.size() * sizeof(int64_t *);
std::memcpy(dev_ptr_staging, cat_column_data_ptr.data(), size_of_col_ptrs);
pinned_staging_elements_used += cat_column_data_ptr.size();
rmm_resources.emplace_back(size_of_col_ptrs, task_stream, mr);
rmm::device_buffer &dev_in_column_ptr = rmm_resources.back();
CK_CUDA_THROW_(hipMemcpyAsync(dev_in_column_ptr.data(), dev_ptr_staging, size_of_col_ptrs,
hipMemcpyHostToDevice, task_stream));
size_t size_of_csr_pointers = num_csr_buffers * sizeof(int64_t);
int64_t *pinned_csr_val_out_buffer =
reinterpret_cast<int64_t *>((size_t)(dev_ptr_staging) + size_of_col_ptrs);
for (int i = 0; i < num_csr_buffers; i++) {
pinned_csr_val_out_buffer[i] = (int64_t)csr_value_buffers[i].data();
}
pinned_staging_elements_used += num_csr_buffers;
rmm_resources.emplace_back(size_of_csr_pointers, task_stream, mr);
rmm::device_buffer &dev_csr_value_ptr = rmm_resources.back();
CK_CUDA_THROW_(hipMemcpyAsync(dev_csr_value_ptr.data(), pinned_csr_val_out_buffer,
size_of_csr_pointers, hipMemcpyHostToDevice, task_stream));
int64_t *pinned_csr_row_offset_buffer = reinterpret_cast<int64_t *>(
(size_t)(dev_ptr_staging) + size_of_col_ptrs + size_of_csr_pointers);
for (int i = 0; i < num_csr_buffers; i++) {
pinned_csr_row_offset_buffer[i] = (int64_t)csr_row_offset_buffers[i].data();
}
pinned_staging_elements_used += num_csr_buffers;
rmm_resources.emplace_back(size_of_csr_pointers, task_stream, mr);
rmm::device_buffer &dev_csr_row_offset_ptr = rmm_resources.back();
CK_CUDA_THROW_(hipMemcpyAsync(dev_csr_row_offset_ptr.data(), pinned_csr_row_offset_buffer,
size_of_csr_pointers, hipMemcpyHostToDevice, task_stream));
if (distributed_slot) {
std::vector<rmm::device_buffer> csr_row_offset_staging;
size_t csr_roff_buf_size = (size_t)((num_slots * batch_size + 1) * sizeof(T));
for (int i = 0; i < num_csr_buffers; i++) {
csr_row_offset_staging.emplace_back(csr_roff_buf_size, task_stream, mr);
CK_CUDA_THROW_(
hipMemsetAsync(csr_row_offset_staging.back().data(), 0, csr_roff_buf_size, task_stream));
}
int64_t *pinned_csr_row_offset_staging =
reinterpret_cast<int64_t *>((size_t)pinned_csr_row_offset_buffer + size_of_csr_pointers);
for (int i = 0; i < num_csr_buffers; i++) {
pinned_csr_row_offset_staging[i] = (int64_t)csr_row_offset_staging[i].data();
}
pinned_staging_elements_used += num_csr_buffers;
rmm_resources.emplace_back(size_of_csr_pointers, task_stream, mr);
rmm::device_buffer &dev_csr_row_offset_staging_ptr = rmm_resources.back();
CK_CUDA_THROW_(hipMemcpyAsync(dev_csr_row_offset_staging_ptr.data(),
pinned_csr_row_offset_staging, size_of_csr_pointers,
hipMemcpyHostToDevice, task_stream));
int block_size = (sizeof(T) == 8) ? 128 : 256;
dim3 block(block_size, 1, 1);
dim3 grid((batch_size - 1) / block.x + 1, 1, 1);
size_t smem_size = (block_size / 32) * sizeof(T) * 32 * 33;
size_t smem_atomic_buffer = num_devices * num_params * sizeof(uint32_t);
smem_size += smem_atomic_buffer;
size_t max_smem_size = 48 * 1024;
if (smem_size > max_smem_size)
CK_THROW_(Error_t::OutOfMemory, "Parquet Converter: Not enough shared memory availble");
// 2 -pass, setup row_offset, prefix_sum, write val to buf idx provided by prefix sum
hipLaunchKernelGGL(( cat_distributed_slot_csr_roffset_kernel__<T>), dim3(grid), dim3(block), smem_size, task_stream,
(int64_t *)dev_in_column_ptr.data(), num_params, param_id, num_slots, batch_size,
num_devices, dev_slot_offset_ptr, (int64_t *)dev_csr_row_offset_staging_ptr.data(),
dev_embed_param_offset_buf);
CK_CUDA_THROW_(hipGetLastError());
for (int i = 0; i < num_csr_buffers; i++) {
rmm_resources.emplace_back(std::move(csr_row_offset_staging.back()));
csr_row_offset_staging.pop_back();
}
// prefix sum
// dont really need to do prefix sum on int64 - check for future
void *tmp_storage = NULL;
size_t temp_storage_bytes = 0;
int prefix_sum_items = num_slots * batch_size + 1;
CK_CUDA_THROW_(hipcub::DeviceScan::ExclusiveSum(
tmp_storage, temp_storage_bytes, reinterpret_cast<T *>(pinned_csr_row_offset_staging[0]),
reinterpret_cast<T *>(pinned_csr_row_offset_buffer[0]), prefix_sum_items, task_stream));
rmm_resources.emplace_back(temp_storage_bytes, task_stream, mr);
rmm::device_buffer &cub_tmp_storage = rmm_resources.back();
/********************
how to make prefix sum write at correct location??
- you already incremented the staging with running atomic counter in kernel
will exscan sum even work when buffers start rolling in ?
may need inscan for param_id > 0 */
// dont need all that per param csr buffers are different
// exscan on only current param's csr buffers
for (int i = 0; i < num_devices; i++) {
if (pid == resource_manager->get_process_id_from_gpu_global_id(i)) {
int buffer_id = i * num_params + param_id;
CK_CUDA_THROW_(hipcub::DeviceScan::ExclusiveSum(
cub_tmp_storage.data(), temp_storage_bytes,
reinterpret_cast<T *>(pinned_csr_row_offset_staging[buffer_id]),
reinterpret_cast<T *>(pinned_csr_row_offset_buffer[buffer_id]), prefix_sum_items,
task_stream));
} else {
// pinned_csr_row_offset_buffer[x] are init'd to zero - no need to set again
}
}
// kernel to set csr value based on idx generated in prefix scan - everything is single-hot
hipLaunchKernelGGL(( cat_distributed_slot_csr_val_kernel__<T>), dim3(grid), dim3(block), smem_size, task_stream,
(int64_t *)dev_in_column_ptr.data(), num_params, param_id, num_slots, batch_size,
num_devices, dev_slot_offset_ptr, (int64_t *)dev_csr_value_ptr.data(),
(int64_t *)dev_csr_row_offset_ptr.data());
CK_CUDA_THROW_(hipGetLastError());
} else {
int32_t *pinned_slot_per_device =
reinterpret_cast<int32_t *>((size_t)pinned_csr_row_offset_buffer + size_of_csr_pointers);
// localized embedding , generate slot to idx count mappings
for (int i = 0; i < num_devices; i++) pinned_slot_per_device[i] = 0;
for (int i = 0; i < num_slots; i++) {
pinned_slot_per_device[i % num_devices]++;
}
pinned_staging_elements_used += num_devices;
rmm_resources.emplace_back(num_devices * sizeof(int32_t), task_stream, mr);
rmm::device_buffer &dev_slot_per_device_ptr = rmm_resources.back();
CK_CUDA_THROW_(hipMemcpyAsync(dev_slot_per_device_ptr.data(), pinned_slot_per_device,
num_devices * sizeof(int32_t), hipMemcpyHostToDevice,
task_stream));
int block_size = (sizeof(T) == 8) ? 128 : 256;
dim3 block(block_size, 1, 1);
dim3 grid((batch_size - 1) / block.x + 1, 1, 1);
size_t smem_size = (block_size / 32) * sizeof(T) * 32 * 33;
size_t smem_atomic_buffer = num_devices * num_params * sizeof(uint32_t);
smem_size += smem_atomic_buffer;
size_t max_smem_size = 48 * 1024;
if (smem_size > max_smem_size)
CK_THROW_(Error_t::OutOfMemory, "Parquet Converter: Not enough shared memory availble");
hipLaunchKernelGGL(( cat_local_slot_converter_kernel__<T>), dim3(grid), dim3(block), smem_size, task_stream,
(int64_t *)dev_in_column_ptr.data(), num_params, param_id, num_slots, batch_size,
num_devices, (int32_t *)dev_slot_per_device_ptr.data(), dev_slot_offset_ptr,
(int64_t *)dev_csr_value_ptr.data(), (int64_t *)dev_csr_row_offset_ptr.data(),
dev_embed_param_offset_buf);
CK_CUDA_THROW_(hipGetLastError());
// csr_row_offset col val = idx
// everything is single-hot , single-param for now
// future - take-in atomic_offset_counter from last fn call to start at right offset of
// csr_row_offset_buf same offset goes to converter_kernel as well for both value, row_offset
// buffer
int max_elements_csr_row = num_slots * batch_size + 1;
dim3 block_2(1024, 1, 1);
dim3 grid_2((max_elements_csr_row - 1) / block_2.x + 1, 1, 1);
for (int device = 0; device < num_devices; device++) {
if (pid == resource_manager->get_process_id_from_gpu_global_id(device)) {
int buf_id = device * num_params + param_id;
hipLaunchKernelGGL(( check_and_set_csr_row_kernel_<T>), dim3(grid_2), dim3(block_2), 0, task_stream,
(int64_t *)dev_csr_row_offset_ptr.data(), dev_embed_param_offset_buf,
max_elements_csr_row, buf_id);
}
}
}
CK_CUDA_THROW_(hipGetLastError());
return pinned_staging_elements_used;
}
// init function instances here
template void convert_parquet_dense_columns<float>(
std::vector<float *> &dense_column_data_ptr, const int label_dense_dim, int batch_size,
int num_dense_buffers, std::vector<rmm::device_buffer> &dense_data_buffers,
int64_t *dev_ptr_staging, std::deque<rmm::device_buffer> &rmm_resources,
rmm::mr::device_memory_resource *mr, hipStream_t task_stream);
template size_t convert_parquet_cat_columns<long long int>(
std::vector<long long int *> &cat_column_data_ptr, int num_params, int param_id, int num_slots,
int batch_size, int num_csr_buffers, int num_devices, bool distributed_slot, int pid,
const std::shared_ptr<ResourceManager> resource_manager,
std::vector<rmm::device_buffer> &csr_value_buffers,
std::vector<rmm::device_buffer> &csr_row_offset_buffers, int64_t *dev_ptr_staging,
uint32_t *dev_embed_param_offset_buf, long long *dev_slot_offset_ptr,
std::deque<rmm::device_buffer> &rmm_resources, rmm::mr::device_memory_resource *mr,
hipStream_t task_stream);
template size_t convert_parquet_cat_columns<unsigned int>(
std::vector<unsigned int *> &cat_column_data_ptr, int num_params, int param_id, int num_slots,
int batch_size, int num_csr_buffers, int num_devices, bool distributed_slot, int pid,
const std::shared_ptr<ResourceManager> resource_manager,
std::vector<rmm::device_buffer> &csr_value_buffers,
std::vector<rmm::device_buffer> &csr_row_offset_buffers, int64_t *dev_ptr_staging,
uint32_t *dev_embed_param_offset_buf, unsigned int *dev_slot_offset_ptr,
std::deque<rmm::device_buffer> &rmm_resources, rmm::mr::device_memory_resource *mr,
hipStream_t task_stream);
} // namespace HugeCTR
| 77400ba42d883656c84cdc14737a931caa3022b5.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <inttypes.h>
#include <cstring>
#include <cub/cub.cuh>
#include <deque>
#include <rmm/device_buffer.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <vector>
#include "HugeCTR/include/common.hpp"
#include "HugeCTR/include/resource_manager.hpp"
namespace HugeCTR {
__device__ __forceinline__ unsigned int __mylaneid() {
unsigned int laneid;
asm volatile("mov.u32 %0, %laneid;" : "=r"(laneid));
return laneid;
}
__device__ __forceinline__ unsigned int abs(unsigned int x) { return x; }
template <typename T>
__global__ void dense_data_converter_kernel__(int64_t *dense_data_column_ptrs,
const int label_dense_dim, int batch_size,
int num_dense_buffers, int64_t *dense_data_out_ptrs) {
// extern __shared__ char smem_[];
// 8 warps/block
int tile_w = 32; // 32x32 tile
int smem_pitch = 33;
__shared__ T smem_staging_ptr[8 * 32 * 33];
int start_idx = threadIdx.x + (blockDim.x * blockIdx.x);
// outer loop on label_dense_dim
for (int i = 0; i < label_dense_dim; i += warpSize) {
// stage 32x32 tile - stage 32 columns of data
// warpSize drives the row dim to 32
for (int j = 0; j < tile_w; j++) {
// warp does one column at a time
int col = i + j;
if (col < label_dense_dim) {
int64_t addr = dense_data_column_ptrs[col];
T *data = reinterpret_cast<T *>(addr);
if (start_idx < batch_size) {
smem_staging_ptr[threadIdx.x * smem_pitch + j] = data[start_idx];
}
}
}
__syncthreads();
// write out
int out_row_idx = __shfl_sync(0xffffffff, start_idx, 0);
// each warp writes out 32 rows and whatever active columns in j(32)
if ((__mylaneid() + i) < label_dense_dim) {
// activate threads
// blockStrided warp over 32 rows write out
int warp_id = threadIdx.x / warpSize;
int smem_row = warp_id * warpSize;
// warpsize doing tile_h
for (int j = 0; j < warpSize; j++) {
if ((j + out_row_idx) < batch_size) {
int curr_out_row = j + out_row_idx;
int buffer_id = curr_out_row / (batch_size / num_dense_buffers);
int local_id = curr_out_row % (batch_size / num_dense_buffers);
int64_t addr = dense_data_out_ptrs[buffer_id];
T *data = reinterpret_cast<T *>(addr);
data[(local_id * label_dense_dim) + __mylaneid() + i] =
smem_staging_ptr[(smem_row + j) * smem_pitch + __mylaneid()];
}
}
}
__syncthreads();
}
}
template <typename T>
__global__ void cat_local_slot_converter_kernel__(int64_t *cat_data_column_ptrs, int num_params,
int param_id, int num_slots, int batch_size,
int num_devices, int32_t *dev_slot_per_device,
T *dev_slot_offset_ptr,
int64_t *dev_csr_value_ptr,
int64_t *dev_csr_row_offset_ptr,
uint32_t *dev_csr_row_offset_counter) {
// 8 warps/block
int tile_w = 32; // 32x32 tile
int smem_pitch = 33;
extern __shared__ char smem_[];
T *smem_staging_ptr = reinterpret_cast<T *>(smem_);
int num_warps = blockDim.x / warpSize;
uint32_t *block_atomic_accum =
reinterpret_cast<uint32_t *>(smem_ + (num_warps * warpSize * smem_pitch * sizeof(T)));
// zero out smem_accum
for (int i = threadIdx.x; i < num_params * num_devices; i += blockDim.x)
block_atomic_accum[i] = 0;
__syncthreads();
int start_idx = threadIdx.x + (blockDim.x * blockIdx.x);
for (int i = 0; i < num_slots; i += warpSize) {
// stage 32x32 tile - stage 32 columns of data
// warpSize drives the row dim to 32
for (int j = 0; j < tile_w; j++) {
// warp does one column at a time
int col = i + j;
if (col < num_slots) {
int64_t addr = cat_data_column_ptrs[col];
T *data = reinterpret_cast<T *>(addr);
if (start_idx < batch_size) {
smem_staging_ptr[threadIdx.x * smem_pitch + j] =
data[start_idx] + dev_slot_offset_ptr[col];
}
}
}
__syncthreads();
// write out
int out_row_idx = __shfl_sync(0xffffffff, start_idx, 0);
// each warp writes out 32 rows and whatever active columns in j(32)
if ((__mylaneid() + i) < num_slots) {
// activate threads
// blockStrided warp over 32 rows write out
int warp_id = threadIdx.x / warpSize;
int smem_row = warp_id * warpSize;
// warpsize doing tile_h
for (int j = 0; j < warpSize; j++) {
if ((j + out_row_idx) < batch_size) {
int curr_out_row = j + out_row_idx; // batch n
// select buffer:, use dev_slot_per_device for row in that and value buffer
int slot_id = (__mylaneid() + i);
int dev_id = slot_id % num_devices;
int buffer_id = dev_id * num_params + param_id;
int local_id = (slot_id - dev_id) / num_devices;
int64_t addr = dev_csr_value_ptr[buffer_id]; // dev_csr_value_ptr
T *data = reinterpret_cast<T *>(addr);
uint32_t idx_to_buffers = curr_out_row * dev_slot_per_device[dev_id] + local_id;
data[idx_to_buffers] = smem_staging_ptr[(smem_row + j) * smem_pitch + __mylaneid()];
int64_t addr_row_buf = dev_csr_row_offset_ptr[buffer_id];
T *row_offset = reinterpret_cast<T *>(addr_row_buf);
row_offset[idx_to_buffers] = 1;
atomicAdd(&(block_atomic_accum[buffer_id]), 1);
}
}
}
__syncthreads();
}
// update global atomic buffer
for (int i = threadIdx.x; i < num_params * num_devices; i += blockDim.x) {
atomicAdd(&(dev_csr_row_offset_counter[i]), block_atomic_accum[i]);
}
}
template <typename T>
__global__ void cat_distributed_slot_csr_roffset_kernel__(int64_t *cat_data_column_ptrs,
int num_params, int param_id,
int num_slots, int batch_size,
int num_devices, T *dev_slot_offset_ptr,
int64_t *dev_csr_row_offset_ptr,
uint32_t *dev_csr_row_offset_counter) {
// 8 warps/block
int tile_w = 32; // 32x32 tile
int smem_pitch = 33;
extern __shared__ char smem_[];
T *smem_staging_ptr = reinterpret_cast<T *>(smem_);
int num_warps = blockDim.x / warpSize;
uint32_t *block_atomic_accum =
reinterpret_cast<uint32_t *>(smem_ + (num_warps * warpSize * smem_pitch * sizeof(T)));
// zero out smem_accum
for (int i = threadIdx.x; i < num_params * num_devices; i += blockDim.x)
block_atomic_accum[i] = 0;
__syncthreads();
int start_idx = threadIdx.x + (blockDim.x * blockIdx.x);
for (int i = 0; i < num_slots; i += warpSize) {
// stage 32x32 tile - stage 32 columns of data
// warpSize drives the row dim to 32
for (int j = 0; j < tile_w; j++) {
// warp does one column at a time
int col = i + j;
if (col < num_slots) {
int64_t addr = cat_data_column_ptrs[col];
T *data = reinterpret_cast<T *>(addr);
if (start_idx < batch_size) {
smem_staging_ptr[threadIdx.x * smem_pitch + j] =
data[start_idx] + dev_slot_offset_ptr[col];
}
}
}
__syncthreads();
// write out
int out_row_idx = __shfl_sync(0xffffffff, start_idx, 0);
// each warp writes out 32 rows and whatever active columns in j(32)
if ((__mylaneid() + i) < num_slots) {
// activate threads
// blockStrided warp over 32 rows write out
int warp_id = threadIdx.x / warpSize;
int smem_row = warp_id * warpSize;
// warpsize doing tile_h
for (int j = 0; j < warpSize; j++) {
if ((j + out_row_idx) < batch_size) {
int curr_out_row = j + out_row_idx; // batch n
T val = smem_staging_ptr[(smem_row + j) * smem_pitch + __mylaneid()];
int dev_id = abs(val % num_devices);
int buffer_id = dev_id * num_params + param_id;
int slot_id = (__mylaneid() + i);
// adjust this with prev param count as well
uint32_t idx_to_buffers = curr_out_row * num_slots + slot_id;
// dont need this if using staging for initial idx markers
// idx_to_buffers += param_offset_buf[buffer_id]; // TBD
int64_t addr_row_buf = dev_csr_row_offset_ptr[buffer_id];
T *row_offset = reinterpret_cast<T *>(addr_row_buf);
row_offset[idx_to_buffers] = 1;
// row_offset[idx_to_buffers] = 1 + param_offset_buf[buffer_id];
// for ex scan to get write values in distributed row_offset csr buffers
if ((slot_id == (num_slots - 1)) && (curr_out_row == (batch_size - 1)))
row_offset[idx_to_buffers + 1] = 1;
atomicAdd(&(block_atomic_accum[buffer_id]), 1);
}
}
}
__syncthreads();
}
// update global atomic buffer
for (int i = threadIdx.x; i < num_params * num_devices; i += blockDim.x) {
atomicAdd(&(dev_csr_row_offset_counter[i]), block_atomic_accum[i]);
}
}
template <typename T>
__global__ void cat_distributed_slot_csr_val_kernel__(int64_t *cat_data_column_ptrs, int num_params,
int param_id, int num_slots, int batch_size,
int num_devices, T *dev_slot_offset_ptr,
int64_t *dev_csr_row_val_ptr,
int64_t *dev_csr_row_offset_ptr) {
// 8 warps/block
int tile_w = 32; // 32x32 tile
int smem_pitch = 33;
extern __shared__ char smem_[];
T *smem_staging_ptr = reinterpret_cast<T *>(smem_);
int start_idx = threadIdx.x + (blockDim.x * blockIdx.x);
for (int i = 0; i < num_slots; i += warpSize) {
// stage 32x32 tile - stage 32 columns of data
// warpSize drives the row dim to 32
for (int j = 0; j < tile_w; j++) {
// warp does one column at a time
int col = i + j;
if (col < num_slots) {
int64_t addr = cat_data_column_ptrs[col];
T *data = reinterpret_cast<T *>(addr);
if (start_idx < batch_size) {
smem_staging_ptr[threadIdx.x * smem_pitch + j] =
data[start_idx] + dev_slot_offset_ptr[col];
}
}
}
__syncthreads();
// write out
int out_row_idx = __shfl_sync(0xffffffff, start_idx, 0);
// each warp writes out 32 rows and whatever active columns in j(32)
if ((__mylaneid() + i) < num_slots) {
// activate threads
// blockStrided warp over 32 rows write out
int warp_id = threadIdx.x / warpSize;
int smem_row = warp_id * warpSize;
// warpsize doing tile_h
for (int j = 0; j < warpSize; j++) {
if ((j + out_row_idx) < batch_size) {
int curr_out_row = j + out_row_idx; // batch n
T val = smem_staging_ptr[(smem_row + j) * smem_pitch + __mylaneid()];
int dev_id = abs(val % num_devices);
int buffer_id = dev_id * num_params + param_id;
int slot_id = (__mylaneid() + i);
uint32_t idx_to_buffers = curr_out_row * num_slots + slot_id;
int64_t addr_row_buf = dev_csr_row_offset_ptr[buffer_id];
T *row_offset = reinterpret_cast<T *>(addr_row_buf);
T idx = row_offset[idx_to_buffers];
int64_t addr_val_buf = dev_csr_row_val_ptr[buffer_id];
T *val_buf = reinterpret_cast<T *>(addr_val_buf);
val_buf[idx] = val;
}
}
}
__syncthreads();
}
}
template <typename T>
__global__ void check_and_set_csr_row_kernel_(int64_t *dev_csr_row_offset_ptr,
uint32_t *dev_csr_row_offset_counter,
int max_elements_csr_row, int buffer_id) {
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
int64_t addr = dev_csr_row_offset_ptr[buffer_id];
T *data = reinterpret_cast<T *>(addr);
if (idx < max_elements_csr_row) {
T val = data[idx];
if (val == 1) {
data[idx] = (T)idx;
} else if (idx == dev_csr_row_offset_counter[buffer_id]) {
data[idx] = (T)idx;
}
}
}
/**
* Interleave dense (continuous) data parquet columns and write to linear buffer
* @param dense_column_data_ptr vector of device pointers to Parquet columns
* @param label_dense_dim number of dense values
* @param batch_size batch size to load
* @param num_dense_buffers number of dense buffers
* @param dense_data_buffers vector of device buffers to write output
* @param dev_ptr_staging pointer to pinned memory for copying pointer address from h2d
* @param rmm_resources Queue to hold reference to RMM allocations
* @param mr Device memory resource for RMM allocations
* @param task_stream Stream to allocate memory and launch kerenels
*/
template <typename T>
void convert_parquet_dense_columns(std::vector<T *> &dense_column_data_ptr,
const int label_dense_dim, int batch_size, int num_dense_buffers,
std::vector<rmm::device_buffer> &dense_data_buffers,
int64_t *dev_ptr_staging,
std::deque<rmm::device_buffer> &rmm_resources,
rmm::mr::device_memory_resource *mr, cudaStream_t task_stream) {
// tiled load and transpose
size_t size_of_col_ptrs = dense_column_data_ptr.size() * sizeof(T *);
std::memcpy(dev_ptr_staging, dense_column_data_ptr.data(), size_of_col_ptrs);
rmm_resources.emplace_back(size_of_col_ptrs, task_stream, mr);
rmm::device_buffer &dev_in_column_ptr = rmm_resources.back();
CK_CUDA_THROW_(cudaMemcpyAsync(dev_in_column_ptr.data(), dev_ptr_staging, size_of_col_ptrs,
cudaMemcpyHostToDevice, task_stream));
int64_t *pinned_dev_out_buffer =
reinterpret_cast<int64_t *>((size_t)(dev_ptr_staging) + size_of_col_ptrs);
for (unsigned int i = 0; i < dense_data_buffers.size(); i++) {
pinned_dev_out_buffer[i] = (int64_t)dense_data_buffers[i].data();
}
size_t size_of_out_ptrs = dense_data_buffers.size() * sizeof(int64_t);
rmm_resources.emplace_back(size_of_out_ptrs, task_stream, mr);
rmm::device_buffer &dev_out_data_ptr = rmm_resources.back();
CK_CUDA_THROW_(cudaMemcpyAsync(dev_out_data_ptr.data(), pinned_dev_out_buffer, size_of_out_ptrs,
cudaMemcpyHostToDevice, task_stream));
// assuming 48KB smem/SM
// 32x32 tile per warp -> 4096 bytes/warp
// 12 warps -> 384 threads/block
// size_t smem_size = 48 * 1024 * 1024;
dim3 block(256, 1, 1);
dim3 grid((batch_size - 1) / block.x + 1, 1, 1);
dense_data_converter_kernel__<T><<<grid, block, 0, task_stream>>>(
(int64_t *)dev_in_column_ptr.data(), label_dense_dim, batch_size, num_dense_buffers,
(int64_t *)dev_out_data_ptr.data());
CK_CUDA_THROW_(cudaGetLastError());
return;
}
/**
* Interleave categoricals (slot) data parquet columns and write to csr buffers
* @param cat_column_data_ptr vector of device pointers to Parquet columns
* @param num_params number of Embedding params
* @param param_id param idx for current param
* @param num_slots number of slots in current param
* @param batch_size batch size to load
* @param num_csr_buffers number of csr buffers in csr heap
* @param num_devices number of gpu devices
* @param distributed_slot flag to set distributed slot processing
* @param pid pid of node
* @param resource_manager ResourceManager handle for session
* @param csr_value_buffers vector of device buffers to write csr values
* @param csr_row_offset_buffers vector of device buffers to write csr row offset values
* @param dev_ptr_staging pointer to pinned memory for copying pointer address from h2d
* @param dev_embed_param_offset_buf memory to atomically accumulate values written to csr val buf
* @param dev_slot_offset_ptr device buffer with value for slot value offsets to make unique index
* @param rmm_resources Queue to hold reference to RMM allocations
* @param mr Device memory resource for RMM allocations
* @param task_stream Stream to allocate memory and launch kerenels
*/
// for nnz =1 csr size_of_value and size_of_row_offset inc will be same
template <typename T>
size_t convert_parquet_cat_columns(std::vector<T *> &cat_column_data_ptr, int num_params,
int param_id, int num_slots, int batch_size, int num_csr_buffers,
int num_devices, bool distributed_slot, int pid,
const std::shared_ptr<ResourceManager> resource_manager,
std::vector<rmm::device_buffer> &csr_value_buffers,
std::vector<rmm::device_buffer> &csr_row_offset_buffers,
int64_t *dev_ptr_staging, uint32_t *dev_embed_param_offset_buf,
T *dev_slot_offset_ptr,
std::deque<rmm::device_buffer> &rmm_resources,
rmm::mr::device_memory_resource *mr,
cudaStream_t task_stream) {
size_t pinned_staging_elements_used = 0;
// tiled load and transpose
size_t size_of_col_ptrs = cat_column_data_ptr.size() * sizeof(int64_t *);
std::memcpy(dev_ptr_staging, cat_column_data_ptr.data(), size_of_col_ptrs);
pinned_staging_elements_used += cat_column_data_ptr.size();
rmm_resources.emplace_back(size_of_col_ptrs, task_stream, mr);
rmm::device_buffer &dev_in_column_ptr = rmm_resources.back();
CK_CUDA_THROW_(cudaMemcpyAsync(dev_in_column_ptr.data(), dev_ptr_staging, size_of_col_ptrs,
cudaMemcpyHostToDevice, task_stream));
size_t size_of_csr_pointers = num_csr_buffers * sizeof(int64_t);
int64_t *pinned_csr_val_out_buffer =
reinterpret_cast<int64_t *>((size_t)(dev_ptr_staging) + size_of_col_ptrs);
for (int i = 0; i < num_csr_buffers; i++) {
pinned_csr_val_out_buffer[i] = (int64_t)csr_value_buffers[i].data();
}
pinned_staging_elements_used += num_csr_buffers;
rmm_resources.emplace_back(size_of_csr_pointers, task_stream, mr);
rmm::device_buffer &dev_csr_value_ptr = rmm_resources.back();
CK_CUDA_THROW_(cudaMemcpyAsync(dev_csr_value_ptr.data(), pinned_csr_val_out_buffer,
size_of_csr_pointers, cudaMemcpyHostToDevice, task_stream));
int64_t *pinned_csr_row_offset_buffer = reinterpret_cast<int64_t *>(
(size_t)(dev_ptr_staging) + size_of_col_ptrs + size_of_csr_pointers);
for (int i = 0; i < num_csr_buffers; i++) {
pinned_csr_row_offset_buffer[i] = (int64_t)csr_row_offset_buffers[i].data();
}
pinned_staging_elements_used += num_csr_buffers;
rmm_resources.emplace_back(size_of_csr_pointers, task_stream, mr);
rmm::device_buffer &dev_csr_row_offset_ptr = rmm_resources.back();
CK_CUDA_THROW_(cudaMemcpyAsync(dev_csr_row_offset_ptr.data(), pinned_csr_row_offset_buffer,
size_of_csr_pointers, cudaMemcpyHostToDevice, task_stream));
if (distributed_slot) {
std::vector<rmm::device_buffer> csr_row_offset_staging;
size_t csr_roff_buf_size = (size_t)((num_slots * batch_size + 1) * sizeof(T));
for (int i = 0; i < num_csr_buffers; i++) {
csr_row_offset_staging.emplace_back(csr_roff_buf_size, task_stream, mr);
CK_CUDA_THROW_(
cudaMemsetAsync(csr_row_offset_staging.back().data(), 0, csr_roff_buf_size, task_stream));
}
int64_t *pinned_csr_row_offset_staging =
reinterpret_cast<int64_t *>((size_t)pinned_csr_row_offset_buffer + size_of_csr_pointers);
for (int i = 0; i < num_csr_buffers; i++) {
pinned_csr_row_offset_staging[i] = (int64_t)csr_row_offset_staging[i].data();
}
pinned_staging_elements_used += num_csr_buffers;
rmm_resources.emplace_back(size_of_csr_pointers, task_stream, mr);
rmm::device_buffer &dev_csr_row_offset_staging_ptr = rmm_resources.back();
CK_CUDA_THROW_(cudaMemcpyAsync(dev_csr_row_offset_staging_ptr.data(),
pinned_csr_row_offset_staging, size_of_csr_pointers,
cudaMemcpyHostToDevice, task_stream));
int block_size = (sizeof(T) == 8) ? 128 : 256;
dim3 block(block_size, 1, 1);
dim3 grid((batch_size - 1) / block.x + 1, 1, 1);
size_t smem_size = (block_size / 32) * sizeof(T) * 32 * 33;
size_t smem_atomic_buffer = num_devices * num_params * sizeof(uint32_t);
smem_size += smem_atomic_buffer;
size_t max_smem_size = 48 * 1024;
if (smem_size > max_smem_size)
CK_THROW_(Error_t::OutOfMemory, "Parquet Converter: Not enough shared memory availble");
// 2 -pass, setup row_offset, prefix_sum, write val to buf idx provided by prefix sum
cat_distributed_slot_csr_roffset_kernel__<T><<<grid, block, smem_size, task_stream>>>(
(int64_t *)dev_in_column_ptr.data(), num_params, param_id, num_slots, batch_size,
num_devices, dev_slot_offset_ptr, (int64_t *)dev_csr_row_offset_staging_ptr.data(),
dev_embed_param_offset_buf);
CK_CUDA_THROW_(cudaGetLastError());
for (int i = 0; i < num_csr_buffers; i++) {
rmm_resources.emplace_back(std::move(csr_row_offset_staging.back()));
csr_row_offset_staging.pop_back();
}
// prefix sum
// dont really need to do prefix sum on int64 - check for future
void *tmp_storage = NULL;
size_t temp_storage_bytes = 0;
int prefix_sum_items = num_slots * batch_size + 1;
CK_CUDA_THROW_(cub::DeviceScan::ExclusiveSum(
tmp_storage, temp_storage_bytes, reinterpret_cast<T *>(pinned_csr_row_offset_staging[0]),
reinterpret_cast<T *>(pinned_csr_row_offset_buffer[0]), prefix_sum_items, task_stream));
rmm_resources.emplace_back(temp_storage_bytes, task_stream, mr);
rmm::device_buffer &cub_tmp_storage = rmm_resources.back();
/********************
how to make prefix sum write at correct location??
- you already incremented the staging with running atomic counter in kernel
will exscan sum even work when buffers start rolling in ?
may need inscan for param_id > 0 */
// dont need all that per param csr buffers are different
// exscan on only current param's csr buffers
for (int i = 0; i < num_devices; i++) {
if (pid == resource_manager->get_process_id_from_gpu_global_id(i)) {
int buffer_id = i * num_params + param_id;
CK_CUDA_THROW_(cub::DeviceScan::ExclusiveSum(
cub_tmp_storage.data(), temp_storage_bytes,
reinterpret_cast<T *>(pinned_csr_row_offset_staging[buffer_id]),
reinterpret_cast<T *>(pinned_csr_row_offset_buffer[buffer_id]), prefix_sum_items,
task_stream));
} else {
// pinned_csr_row_offset_buffer[x] are init'd to zero - no need to set again
}
}
// kernel to set csr value based on idx generated in prefix scan - everything is single-hot
cat_distributed_slot_csr_val_kernel__<T><<<grid, block, smem_size, task_stream>>>(
(int64_t *)dev_in_column_ptr.data(), num_params, param_id, num_slots, batch_size,
num_devices, dev_slot_offset_ptr, (int64_t *)dev_csr_value_ptr.data(),
(int64_t *)dev_csr_row_offset_ptr.data());
CK_CUDA_THROW_(cudaGetLastError());
} else {
int32_t *pinned_slot_per_device =
reinterpret_cast<int32_t *>((size_t)pinned_csr_row_offset_buffer + size_of_csr_pointers);
// localized embedding , generate slot to idx count mappings
for (int i = 0; i < num_devices; i++) pinned_slot_per_device[i] = 0;
for (int i = 0; i < num_slots; i++) {
pinned_slot_per_device[i % num_devices]++;
}
pinned_staging_elements_used += num_devices;
rmm_resources.emplace_back(num_devices * sizeof(int32_t), task_stream, mr);
rmm::device_buffer &dev_slot_per_device_ptr = rmm_resources.back();
CK_CUDA_THROW_(cudaMemcpyAsync(dev_slot_per_device_ptr.data(), pinned_slot_per_device,
num_devices * sizeof(int32_t), cudaMemcpyHostToDevice,
task_stream));
int block_size = (sizeof(T) == 8) ? 128 : 256;
dim3 block(block_size, 1, 1);
dim3 grid((batch_size - 1) / block.x + 1, 1, 1);
size_t smem_size = (block_size / 32) * sizeof(T) * 32 * 33;
size_t smem_atomic_buffer = num_devices * num_params * sizeof(uint32_t);
smem_size += smem_atomic_buffer;
size_t max_smem_size = 48 * 1024;
if (smem_size > max_smem_size)
CK_THROW_(Error_t::OutOfMemory, "Parquet Converter: Not enough shared memory availble");
cat_local_slot_converter_kernel__<T><<<grid, block, smem_size, task_stream>>>(
(int64_t *)dev_in_column_ptr.data(), num_params, param_id, num_slots, batch_size,
num_devices, (int32_t *)dev_slot_per_device_ptr.data(), dev_slot_offset_ptr,
(int64_t *)dev_csr_value_ptr.data(), (int64_t *)dev_csr_row_offset_ptr.data(),
dev_embed_param_offset_buf);
CK_CUDA_THROW_(cudaGetLastError());
// csr_row_offset col val = idx
// everything is single-hot , single-param for now
// future - take-in atomic_offset_counter from last fn call to start at right offset of
// csr_row_offset_buf same offset goes to converter_kernel as well for both value, row_offset
// buffer
int max_elements_csr_row = num_slots * batch_size + 1;
dim3 block_2(1024, 1, 1);
dim3 grid_2((max_elements_csr_row - 1) / block_2.x + 1, 1, 1);
for (int device = 0; device < num_devices; device++) {
if (pid == resource_manager->get_process_id_from_gpu_global_id(device)) {
int buf_id = device * num_params + param_id;
check_and_set_csr_row_kernel_<T><<<grid_2, block_2, 0, task_stream>>>(
(int64_t *)dev_csr_row_offset_ptr.data(), dev_embed_param_offset_buf,
max_elements_csr_row, buf_id);
}
}
}
CK_CUDA_THROW_(cudaGetLastError());
return pinned_staging_elements_used;
}
// init function instances here
template void convert_parquet_dense_columns<float>(
std::vector<float *> &dense_column_data_ptr, const int label_dense_dim, int batch_size,
int num_dense_buffers, std::vector<rmm::device_buffer> &dense_data_buffers,
int64_t *dev_ptr_staging, std::deque<rmm::device_buffer> &rmm_resources,
rmm::mr::device_memory_resource *mr, cudaStream_t task_stream);
template size_t convert_parquet_cat_columns<long long int>(
std::vector<long long int *> &cat_column_data_ptr, int num_params, int param_id, int num_slots,
int batch_size, int num_csr_buffers, int num_devices, bool distributed_slot, int pid,
const std::shared_ptr<ResourceManager> resource_manager,
std::vector<rmm::device_buffer> &csr_value_buffers,
std::vector<rmm::device_buffer> &csr_row_offset_buffers, int64_t *dev_ptr_staging,
uint32_t *dev_embed_param_offset_buf, long long *dev_slot_offset_ptr,
std::deque<rmm::device_buffer> &rmm_resources, rmm::mr::device_memory_resource *mr,
cudaStream_t task_stream);
template size_t convert_parquet_cat_columns<unsigned int>(
std::vector<unsigned int *> &cat_column_data_ptr, int num_params, int param_id, int num_slots,
int batch_size, int num_csr_buffers, int num_devices, bool distributed_slot, int pid,
const std::shared_ptr<ResourceManager> resource_manager,
std::vector<rmm::device_buffer> &csr_value_buffers,
std::vector<rmm::device_buffer> &csr_row_offset_buffers, int64_t *dev_ptr_staging,
uint32_t *dev_embed_param_offset_buf, unsigned int *dev_slot_offset_ptr,
std::deque<rmm::device_buffer> &rmm_resources, rmm::mr::device_memory_resource *mr,
cudaStream_t task_stream);
} // namespace HugeCTR
|
af7014f29c3e1dc1b9a666c67070d02a94067e03.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define tam 1.0
#define dx 0.00001
#define dt 0.000001
#define T 0.01
#define kappa 0.000045
#define MAX_THREADS 512
__global__ void run_ftcs(double *u, double *prev, double k, double t, double x, long int n)
{
long int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i>0 && i<(n-1))
u[i] = prev[i] + k*t/(x*x)*(prev[i-1]-2*prev[i]+prev[i+1]);
}
__global__ void borders(double *u, long int n)
{
long int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i==1) u[0] = u[n] = 0.0;
}
int main(void)
{
/********* HOST VARIABLES *********/
double *tmp, *u, *u_prev;
double x = 0, t = 0;
long int i, n, maxloc = 0, blocks;
n = tam/dx;
u = malloc((n+1)*sizeof(double));
u_prev = malloc((n+1)*sizeof(double));
blocks = ceil((n+1)/MAX_THREADS);
/********* CUDA VARIABLES *********/
double *d_u, *d_u_prev;
hipMalloc((void **)&d_u, n + 1);
hipMalloc((void **)&d_u_prev, n + 1);
hipMemcpy(d_u_prev, u_prev, n + 1, hipMemcpyHostToDevice);
/********* FILLING U_PREV *********/
for (i = 0; i < n + 1; i++) {
if (x <= 0.5) u_prev[i] = 200*x;
else u_prev[i] = 200*(1.-x);
x += dx;
}
while (t < T) {
x = dx;
hipLaunchKernelGGL(( run_ftcs), dim3(nblocks),dim3(THREADS_PER_BLOCK), 0, 0, d_u, d_u_prev, kappa, dt, dx, n+1);
hipDeviceSynchronize();
hipLaunchKernelGGL(( borders), dim3(nblocks),dim3(THREADS_PER_BLOCK), 0, 0, u_u, n);
tmp = u_prev;
u_prev = u;
u = tmp; /* troca entre ponteiros */
t += dt;
}
hipMemcpy(u, d_u, n + 1, hipMemcpyDeviceToHost);
/* Calculando o maior valor e sua localizacao */
for (i = 1; i < n + 1; i++)
if (u[i] > u[maxloc]) maxloc = i;
printf("Maior valor u[%ld] = %g\n", maxloc, u[maxloc]);
free(u);
free(u_prev);
hipFree(u_u);
hipFree(u_prev_prev);
return 0;
}
| af7014f29c3e1dc1b9a666c67070d02a94067e03.cu | #include <stdio.h>
#include <stdlib.h>
#define tam 1.0
#define dx 0.00001
#define dt 0.000001
#define T 0.01
#define kappa 0.000045
#define MAX_THREADS 512
__global__ void run_ftcs(double *u, double *prev, double k, double t, double x, long int n)
{
long int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i>0 && i<(n-1))
u[i] = prev[i] + k*t/(x*x)*(prev[i-1]-2*prev[i]+prev[i+1]);
}
__global__ void borders(double *u, long int n)
{
long int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i==1) u[0] = u[n] = 0.0;
}
int main(void)
{
/********* HOST VARIABLES *********/
double *tmp, *u, *u_prev;
double x = 0, t = 0;
long int i, n, maxloc = 0, blocks;
n = tam/dx;
u = malloc((n+1)*sizeof(double));
u_prev = malloc((n+1)*sizeof(double));
blocks = ceil((n+1)/MAX_THREADS);
/********* CUDA VARIABLES *********/
double *d_u, *d_u_prev;
cudaMalloc((void **)&d_u, n + 1);
cudaMalloc((void **)&d_u_prev, n + 1);
cudaMemcpy(d_u_prev, u_prev, n + 1, cudaMemcpyHostToDevice);
/********* FILLING U_PREV *********/
for (i = 0; i < n + 1; i++) {
if (x <= 0.5) u_prev[i] = 200*x;
else u_prev[i] = 200*(1.-x);
x += dx;
}
while (t < T) {
x = dx;
run_ftcs<<<nblocks,THREADS_PER_BLOCK>>>(d_u, d_u_prev, kappa, dt, dx, n+1);
cudaDeviceSynchronize();
borders<<<nblocks,THREADS_PER_BLOCK>>>(u_u, n);
tmp = u_prev;
u_prev = u;
u = tmp; /* troca entre ponteiros */
t += dt;
}
cudaMemcpy(u, d_u, n + 1, cudaMemcpyDeviceToHost);
/* Calculando o maior valor e sua localizacao */
for (i = 1; i < n + 1; i++)
if (u[i] > u[maxloc]) maxloc = i;
printf("Maior valor u[%ld] = %g\n", maxloc, u[maxloc]);
free(u);
free(u_prev);
cudaFree(u_u);
cudaFree(u_prev_prev);
return 0;
}
|
e0ffbafba510664e3879c4845e0244f34f119512.hip | // !!! This is a file automatically generated by hipify!!!
/*
*
* pinnedBandwidth.cu
*
* Measure memory bandwidth between pinned host memory and
* device memory, both directions. Performs the measurements
* for all devices in the system.
*
* Copyright (c) 2015, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include <chError.h>
#include <chCommandLine.h>
#include <chTimer.h>
template< hipMemcpyKind type >
double
Bandwidth( int iDevice, int cIterations, size_t N )
{
hipError_t status;
double ret = 0.0;
chTimerTimestamp start, stop;
void *pHost = 0, *pDevice = 0;
CUDART_CHECK( hipSetDevice( iDevice ) );
CUDART_CHECK( hipMalloc( &pDevice, N ) );
CUDART_CHECK( hipHostMalloc( &pHost, N ) );
chTimerGetTime( &start );
for ( int i = 0; i < cIterations; i++ ) {
CUDART_CHECK( hipMemcpyAsync( pDevice, pHost, N, type, NULL ) );
}
CUDART_CHECK( hipDeviceSynchronize() );
chTimerGetTime( &stop );
ret = chTimerBandwidth( &start, &stop, cIterations*N );
Error:
hipFree( pDevice );
hipHostFree( pHost );
return ret;
}
int
main( int argc, char *argv[] )
{
hipError_t status;
int cIterations = 100;
int cMB = 64;
int deviceCount;
CUDART_CHECK( hipGetDeviceCount( &deviceCount ) );
chCommandLineGet( &cIterations, "iterations", argc, argv );
chCommandLineGet( &cMB, "MB", argc, argv );
printf( "Transferring %d MB %d times... (all bandwidths in GB/s)\n", cMB, cIterations );
printf( "Device\tHtoD\tDtoH\n" );
for ( int iDevice = 0; iDevice < deviceCount; iDevice++ ) {
printf( "%d\t", iDevice );
printf( "%.2f\t", Bandwidth<hipMemcpyHostToDevice>( iDevice, cIterations, cMB*(size_t) 1048576 )/1e9 );
printf( "%.2f\n", Bandwidth<hipMemcpyDeviceToHost>( iDevice, cIterations, cMB*(size_t) 1048576 )/1e9 );
}
return 0;
Error:
return 1;
}
| e0ffbafba510664e3879c4845e0244f34f119512.cu | /*
*
* pinnedBandwidth.cu
*
* Measure memory bandwidth between pinned host memory and
* device memory, both directions. Performs the measurements
* for all devices in the system.
*
* Copyright (c) 2015, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include <chError.h>
#include <chCommandLine.h>
#include <chTimer.h>
template< cudaMemcpyKind type >
double
Bandwidth( int iDevice, int cIterations, size_t N )
{
cudaError_t status;
double ret = 0.0;
chTimerTimestamp start, stop;
void *pHost = 0, *pDevice = 0;
CUDART_CHECK( cudaSetDevice( iDevice ) );
CUDART_CHECK( cudaMalloc( &pDevice, N ) );
CUDART_CHECK( cudaMallocHost( &pHost, N ) );
chTimerGetTime( &start );
for ( int i = 0; i < cIterations; i++ ) {
CUDART_CHECK( cudaMemcpyAsync( pDevice, pHost, N, type, NULL ) );
}
CUDART_CHECK( cudaDeviceSynchronize() );
chTimerGetTime( &stop );
ret = chTimerBandwidth( &start, &stop, cIterations*N );
Error:
cudaFree( pDevice );
cudaFreeHost( pHost );
return ret;
}
int
main( int argc, char *argv[] )
{
cudaError_t status;
int cIterations = 100;
int cMB = 64;
int deviceCount;
CUDART_CHECK( cudaGetDeviceCount( &deviceCount ) );
chCommandLineGet( &cIterations, "iterations", argc, argv );
chCommandLineGet( &cMB, "MB", argc, argv );
printf( "Transferring %d MB %d times... (all bandwidths in GB/s)\n", cMB, cIterations );
printf( "Device\tHtoD\tDtoH\n" );
for ( int iDevice = 0; iDevice < deviceCount; iDevice++ ) {
printf( "%d\t", iDevice );
printf( "%.2f\t", Bandwidth<cudaMemcpyHostToDevice>( iDevice, cIterations, cMB*(size_t) 1048576 )/1e9 );
printf( "%.2f\n", Bandwidth<cudaMemcpyDeviceToHost>( iDevice, cIterations, cMB*(size_t) 1048576 )/1e9 );
}
return 0;
Error:
return 1;
}
|
916ae206a0a53f9cbc46087381255795c8e04c2e.hip | // !!! This is a file automatically generated by hipify!!!
#include "utils.h"
#include "THHApply.cuh"
struct sigmoidupdateOutput_functor
{
__device__ void operator()(float* output, const float* input) const
{
*output = 1./(1.+ exp(-*input));
}
};
static int cunn_Sigmoid_updateOutput(lua_State *L)
{
//double ts = get_ts();
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 2, input, output));
THCudaTensor_resizeAs(state, output, input);
//double pre_tensor = get_ts() - ts;
double ts = get_ts();
THCudaTensor_pointwiseApply2(state, output, input, sigmoidupdateOutput_functor());
//double tensor = get_ts() - ts;
//double post_tensor = 0.0;
double pointwiseApply2_sigmoid = get_ts() - ts;
//std::cout<<"Sigmoid__pre_tensor|"<<pre_tensor<<std::endl;
//std::cout<<"Sigmoid__tensor|"<<tensor<<std::endl;
//std::cout<<"Sigmoid__post_tensor|"<<post_tensor<<std::endl;
std::cout<<std::fixed<<"pointwiseApply2_sigmoid,"<<pointwiseApply2_sigmoid<<std::endl;
return 1;
}
struct sigmoidupdateGradInput_functor
{
__device__ void operator()(float* gradInput, const float* output, const float* gradOutput) const
{
*gradInput = *gradOutput * (1.-*output) * *output;
}
};
static int cunn_Sigmoid_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *gradInput = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 3, output, gradOutput, gradInput));
THCudaTensor_resizeAs(state, gradInput, output);
THCudaTensor_pointwiseApply3(state, gradInput, output, gradOutput, sigmoidupdateGradInput_functor());
return 1;
}
static const struct luaL_Reg cunn_Sigmoid__ [] = {
{"Sigmoid_updateOutput", cunn_Sigmoid_updateOutput},
{"Sigmoid_updateGradInput", cunn_Sigmoid_updateGradInput},
{NULL, NULL}
};
void cunn_Sigmoid_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_Sigmoid__, "nn");
lua_pop(L,1);
}
| 916ae206a0a53f9cbc46087381255795c8e04c2e.cu | #include "utils.h"
#include "THCApply.cuh"
struct sigmoidupdateOutput_functor
{
__device__ void operator()(float* output, const float* input) const
{
*output = 1./(1.+ exp(-*input));
}
};
static int cunn_Sigmoid_updateOutput(lua_State *L)
{
//double ts = get_ts();
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 2, input, output));
THCudaTensor_resizeAs(state, output, input);
//double pre_tensor = get_ts() - ts;
double ts = get_ts();
THCudaTensor_pointwiseApply2(state, output, input, sigmoidupdateOutput_functor());
//double tensor = get_ts() - ts;
//double post_tensor = 0.0;
double pointwiseApply2_sigmoid = get_ts() - ts;
//std::cout<<"Sigmoid__pre_tensor|"<<pre_tensor<<std::endl;
//std::cout<<"Sigmoid__tensor|"<<tensor<<std::endl;
//std::cout<<"Sigmoid__post_tensor|"<<post_tensor<<std::endl;
std::cout<<std::fixed<<"pointwiseApply2_sigmoid,"<<pointwiseApply2_sigmoid<<std::endl;
return 1;
}
struct sigmoidupdateGradInput_functor
{
__device__ void operator()(float* gradInput, const float* output, const float* gradOutput) const
{
*gradInput = *gradOutput * (1.-*output) * *output;
}
};
static int cunn_Sigmoid_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *gradInput = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 3, output, gradOutput, gradInput));
THCudaTensor_resizeAs(state, gradInput, output);
THCudaTensor_pointwiseApply3(state, gradInput, output, gradOutput, sigmoidupdateGradInput_functor());
return 1;
}
static const struct luaL_Reg cunn_Sigmoid__ [] = {
{"Sigmoid_updateOutput", cunn_Sigmoid_updateOutput},
{"Sigmoid_updateGradInput", cunn_Sigmoid_updateGradInput},
{NULL, NULL}
};
void cunn_Sigmoid_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_Sigmoid__, "nn");
lua_pop(L,1);
}
|
e013ec6922f83f6200fff944df31939faec18509.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define WINSIZE 1
const int nebsize=(WINSIZE*2+1)*(WINSIZE*2+1);
__global__ void loop(double *img, int *row_inds, int *col_inds, double *vals, int w, int h, double epsilon){
int ind=blockIdx.x*blockDim.x+threadIdx.x;
if(ind<w*h && ind%h>=WINSIZE && ind%h<h-WINSIZE && ind/h>=WINSIZE && ind/h<w-WINSIZE){
int win_inds[nebsize];
double tvals[nebsize*nebsize];
int k;
k=0;
for(int i=-WINSIZE; i<=WINSIZE; i++){
for(int j=-WINSIZE; j<=WINSIZE; j++){
win_inds[k]=ind+i*h+j;
k++;
}
}
double winI[3*nebsize];
double winI_aux[3*nebsize];
double pre_win_var[9];
double win_var[9];
double win_mu[3];
double detwin;
k=0;
for(int i=-WINSIZE; i<=WINSIZE; i++){
memcpy(&winI[3*k*(WINSIZE*2+1)],&img[3*(ind+i*h-WINSIZE)], 3*(WINSIZE*2+1)*sizeof(double));
k++;
}
win_mu[0]=0;
win_mu[1]=0;
win_mu[2]=0;
for(int i=0; i<nebsize; i++){
win_mu[0]+=winI[3*i];
win_mu[1]+=winI[3*i+1];
win_mu[2]+=winI[3*i+2];
}
win_mu[0]=win_mu[0]/nebsize;
win_mu[1]=win_mu[1]/nebsize;
win_mu[2]=win_mu[2]/nebsize;
for(int i=0; i<3; i++){
for(int j=0; j<3; j++){
pre_win_var[3*i+j]=0;
for(int n=0; n<nebsize; n++){
pre_win_var[3*i+j]+=winI[3*n+i]*winI[3*n+j];
}
pre_win_var[3*i+j]=pre_win_var[3*i+j]/nebsize;
pre_win_var[3*i+j]+=(i==j)*epsilon/nebsize-win_mu[j]*win_mu[i];
}
}
//inversa
detwin=pre_win_var[0]*pre_win_var[4]*pre_win_var[8]+pre_win_var[2]*pre_win_var[3]*pre_win_var[7]+pre_win_var[1]*pre_win_var[5]*pre_win_var[6];
detwin-=pre_win_var[6]*pre_win_var[4]*pre_win_var[2]+pre_win_var[3]*pre_win_var[1]*pre_win_var[8]+pre_win_var[7]*pre_win_var[5]*pre_win_var[0];
win_var[0]=(pre_win_var[4]*pre_win_var[8]-pre_win_var[5]*pre_win_var[7])/detwin;
win_var[3]=-(pre_win_var[3]*pre_win_var[8]-pre_win_var[5]*pre_win_var[6])/detwin;
win_var[6]=(pre_win_var[3]*pre_win_var[7]-pre_win_var[4]*pre_win_var[6])/detwin;
win_var[1]=-(pre_win_var[1]*pre_win_var[8]-pre_win_var[2]*pre_win_var[7])/detwin;
win_var[4]=(pre_win_var[0]*pre_win_var[8]-pre_win_var[2]*pre_win_var[6])/detwin;
win_var[7]=-(pre_win_var[0]*pre_win_var[7]-pre_win_var[1]*pre_win_var[6])/detwin;
win_var[2]=(pre_win_var[1]*pre_win_var[5]-pre_win_var[2]*pre_win_var[4])/detwin;
win_var[5]=-(pre_win_var[0]*pre_win_var[5]-pre_win_var[2]*pre_win_var[3])/detwin;
win_var[8]=(pre_win_var[0]*pre_win_var[4]-pre_win_var[1]*pre_win_var[3])/detwin;
//fim da inversa
for(int i=0; i<nebsize; i++){
winI[3*i+0]-=win_mu[0];
winI[3*i+1]-=win_mu[1];
winI[3*i+2]-=win_mu[2];
}
for(int i=0; i<nebsize; i++){
for(int j=0; j<3; j++){
winI_aux[3*i+j]=0;
for(int n=0; n<3; n++){
winI_aux[3*i+j]+=winI[3*i+n]*win_var[3*n+j];
}
}
}
for(int i=0; i<nebsize; i++){
for(int j=0; j<nebsize; j++){
tvals[i*nebsize+j]=0;
for(int n=0; n<3; n++){
tvals[i*nebsize+j]+=winI_aux[3*i+n]*winI[3*j+n];
}
tvals[i*nebsize+j]++;
tvals[i*nebsize+j]=(i==j)-tvals[i*nebsize+j]/nebsize;
}
}
k=0;
int base=((ind/h-WINSIZE)*(h-2*WINSIZE)+(ind%h-WINSIZE))*nebsize*nebsize;
for(int i=0; i<nebsize; i++){
for(int j=0; j<nebsize; j++){
row_inds[base+k]=win_inds[j];
col_inds[base+k]=win_inds[i];
k++;
}
}
memcpy(&vals[base],tvals, nebsize*nebsize*sizeof(double));
}
}
| e013ec6922f83f6200fff944df31939faec18509.cu | #define WINSIZE 1
const int nebsize=(WINSIZE*2+1)*(WINSIZE*2+1);
__global__ void loop(double *img, int *row_inds, int *col_inds, double *vals, int w, int h, double epsilon){
int ind=blockIdx.x*blockDim.x+threadIdx.x;
if(ind<w*h && ind%h>=WINSIZE && ind%h<h-WINSIZE && ind/h>=WINSIZE && ind/h<w-WINSIZE){
int win_inds[nebsize];
double tvals[nebsize*nebsize];
int k;
k=0;
for(int i=-WINSIZE; i<=WINSIZE; i++){
for(int j=-WINSIZE; j<=WINSIZE; j++){
win_inds[k]=ind+i*h+j;
k++;
}
}
double winI[3*nebsize];
double winI_aux[3*nebsize];
double pre_win_var[9];
double win_var[9];
double win_mu[3];
double detwin;
k=0;
for(int i=-WINSIZE; i<=WINSIZE; i++){
memcpy(&winI[3*k*(WINSIZE*2+1)],&img[3*(ind+i*h-WINSIZE)], 3*(WINSIZE*2+1)*sizeof(double));
k++;
}
win_mu[0]=0;
win_mu[1]=0;
win_mu[2]=0;
for(int i=0; i<nebsize; i++){
win_mu[0]+=winI[3*i];
win_mu[1]+=winI[3*i+1];
win_mu[2]+=winI[3*i+2];
}
win_mu[0]=win_mu[0]/nebsize;
win_mu[1]=win_mu[1]/nebsize;
win_mu[2]=win_mu[2]/nebsize;
for(int i=0; i<3; i++){
for(int j=0; j<3; j++){
pre_win_var[3*i+j]=0;
for(int n=0; n<nebsize; n++){
pre_win_var[3*i+j]+=winI[3*n+i]*winI[3*n+j];
}
pre_win_var[3*i+j]=pre_win_var[3*i+j]/nebsize;
pre_win_var[3*i+j]+=(i==j)*epsilon/nebsize-win_mu[j]*win_mu[i];
}
}
//inversa
detwin=pre_win_var[0]*pre_win_var[4]*pre_win_var[8]+pre_win_var[2]*pre_win_var[3]*pre_win_var[7]+pre_win_var[1]*pre_win_var[5]*pre_win_var[6];
detwin-=pre_win_var[6]*pre_win_var[4]*pre_win_var[2]+pre_win_var[3]*pre_win_var[1]*pre_win_var[8]+pre_win_var[7]*pre_win_var[5]*pre_win_var[0];
win_var[0]=(pre_win_var[4]*pre_win_var[8]-pre_win_var[5]*pre_win_var[7])/detwin;
win_var[3]=-(pre_win_var[3]*pre_win_var[8]-pre_win_var[5]*pre_win_var[6])/detwin;
win_var[6]=(pre_win_var[3]*pre_win_var[7]-pre_win_var[4]*pre_win_var[6])/detwin;
win_var[1]=-(pre_win_var[1]*pre_win_var[8]-pre_win_var[2]*pre_win_var[7])/detwin;
win_var[4]=(pre_win_var[0]*pre_win_var[8]-pre_win_var[2]*pre_win_var[6])/detwin;
win_var[7]=-(pre_win_var[0]*pre_win_var[7]-pre_win_var[1]*pre_win_var[6])/detwin;
win_var[2]=(pre_win_var[1]*pre_win_var[5]-pre_win_var[2]*pre_win_var[4])/detwin;
win_var[5]=-(pre_win_var[0]*pre_win_var[5]-pre_win_var[2]*pre_win_var[3])/detwin;
win_var[8]=(pre_win_var[0]*pre_win_var[4]-pre_win_var[1]*pre_win_var[3])/detwin;
//fim da inversa
for(int i=0; i<nebsize; i++){
winI[3*i+0]-=win_mu[0];
winI[3*i+1]-=win_mu[1];
winI[3*i+2]-=win_mu[2];
}
for(int i=0; i<nebsize; i++){
for(int j=0; j<3; j++){
winI_aux[3*i+j]=0;
for(int n=0; n<3; n++){
winI_aux[3*i+j]+=winI[3*i+n]*win_var[3*n+j];
}
}
}
for(int i=0; i<nebsize; i++){
for(int j=0; j<nebsize; j++){
tvals[i*nebsize+j]=0;
for(int n=0; n<3; n++){
tvals[i*nebsize+j]+=winI_aux[3*i+n]*winI[3*j+n];
}
tvals[i*nebsize+j]++;
tvals[i*nebsize+j]=(i==j)-tvals[i*nebsize+j]/nebsize;
}
}
k=0;
int base=((ind/h-WINSIZE)*(h-2*WINSIZE)+(ind%h-WINSIZE))*nebsize*nebsize;
for(int i=0; i<nebsize; i++){
for(int j=0; j<nebsize; j++){
row_inds[base+k]=win_inds[j];
col_inds[base+k]=win_inds[i];
k++;
}
}
memcpy(&vals[base],tvals, nebsize*nebsize*sizeof(double));
}
}
|
ca66e87b9e6dea67124f39ce20e5d2dda9147c23.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "decryptKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *deviceDataIn = NULL;
hipMalloc(&deviceDataIn, XSIZE*YSIZE);
char *deviceDataOut = NULL;
hipMalloc(&deviceDataOut, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
decryptKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, deviceDataIn,deviceDataOut,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
decryptKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, deviceDataIn,deviceDataOut,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
decryptKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, deviceDataIn,deviceDataOut,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ca66e87b9e6dea67124f39ce20e5d2dda9147c23.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "decryptKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *deviceDataIn = NULL;
cudaMalloc(&deviceDataIn, XSIZE*YSIZE);
char *deviceDataOut = NULL;
cudaMalloc(&deviceDataOut, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
decryptKernel<<<gridBlock,threadBlock>>>(deviceDataIn,deviceDataOut,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
decryptKernel<<<gridBlock,threadBlock>>>(deviceDataIn,deviceDataOut,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
decryptKernel<<<gridBlock,threadBlock>>>(deviceDataIn,deviceDataOut,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
41fd9ce78b00007d481eea9d1880ae08a3ecb0c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication and is exactly the same as
* Chapter 7 of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* CUBLAS provides high-performance matrix multiplication.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
//#include <cutil_inline.h>
//#include <helper_functions.h>
// includes, kernels
#include <matrixMul_gold.h>
#include <matrixMul_kernel.cuh>
#include <matrixMul_naive.cuh>
#include <matrixMul_tiling.cuh>
#include <matrixMul_coalescing.cuh>
#include <matrixMul_noBankConflict.cuh>
#include <matrixMul_compOpt.cuh>
#include <matrixMul_unroll.cuh>
#include <matrixMul_prefetch.cuh>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
void randomInit(float*, int);
void printDiff(float*, float*, int, int);
////////////////////////////////////////////////////////////////////////////////
// Helper Functions
////////////////////////////////////////////////////////////////////////////////
#ifndef STRNCASECMP
#define STRNCASECMP strncasecmp
#endif
inline int stringRemoveDelimiter(char delimiter, const char *string)
{
int string_start = 0;
while (string[string_start] == delimiter)
{
string_start++;
}
if (string_start >= (int)strlen(string)-1)
{
return 0;
}
return string_start;
}
inline bool checkCmdLineFlag(const int argc, const char **argv, const char *string_ref)
{
bool bFound = false;
if (argc >= 1)
{
for (int i=1; i < argc; i++)
{
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
const char *equal_pos = strchr(string_argv, '=');
int argv_length = (int)(equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv);
int length = (int)strlen(string_ref);
if (length == argv_length && !STRNCASECMP(string_argv, string_ref, length))
{
bFound = true;
continue;
}
}
}
return bFound;
}
inline int getCmdLineArgumentInt(const int argc, const char **argv, const char *string_ref)
{
bool bFound = false;
int value = -1;
if (argc >= 1)
{
for (int i=1; i < argc; i++)
{
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = (int)strlen(string_ref);
if (!STRNCASECMP(string_argv, string_ref, length))
{
if (length+1 <= (int)strlen(string_argv))
{
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = atoi(&string_argv[length + auto_inc]);
}
else
{
value = 0;
}
bFound = true;
continue;
}
}
}
if (bFound)
{
return value;
}
else
{
return 0;
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
runTest(argc, argv);
exit(EXIT_SUCCESS);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char** argv)
{
/****************************************************/
/* Preparations */
/****************************************************/
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
hipSetDevice(devID);
}
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDevice(&devID);
if (error != hipSuccess)
{
printf("hipGetDevice returned error code %d, line(%d)\n", error, __LINE__);
}
error = hipGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == hipComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// utilities
hipEvent_t start;
hipEvent_t stop;
float msecTotal;
// set seed for rand()
srand(2006);
// allocate host memory for matrices A and B
unsigned int size_A = WA * HA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = WB * HB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
float flop = 2 * (float)WC * (float)HC * (float)WA;
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A;
hipMalloc((void**) &d_A, mem_size_A);
float* d_B;
hipMalloc((void**) &d_B, mem_size_B);
// allocate device memory for result
unsigned int size_C = WC * HC;
unsigned int mem_size_C = sizeof(float) * size_C;
float* d_C;
hipMalloc((void**) &d_C, mem_size_C);
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
#if CHECK_RESULT == 1
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// compute reference solution
float* reference = (float*) malloc(mem_size_C);
computeGold(reference, h_A, h_B, HA, WA, WB);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("Naive CPU (Golden Reference)\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#endif
dim3 threads,grid;
/****************************************************/
/* CUDA SDK example */
/****************************************************/
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// execute the kernel
hipLaunchKernelGGL(( matrixMul), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("GPU SDK Sample\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* naive implementation on GPU */
/****************************************************/
#if ENABLE_NAIVE == 1
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// naive implementation
hipLaunchKernelGGL(( matrixMul_naive), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("Naive GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
#endif
/****************************************************/
/* Tiling without global mem coalescing */
/****************************************************/
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// naive implementation
hipLaunchKernelGGL(( matrixMul_tiling), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("Tiling GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Global mem coalescing with smem bank conflict */
/****************************************************/
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// naive implementation
hipLaunchKernelGGL(( matrixMul_coalescing), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("Global mem coalescing GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Global mem coalescing w/o smem bank conflict */
/****************************************************/
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// naive implementation
hipLaunchKernelGGL(( matrixMul_noBankConflict), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("Remove shared mem bank conflict GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Threads perform computation optimizatin */
/****************************************************/
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, 4);
grid = dim3(WC / (BLOCK_SIZE*4), HC / BLOCK_SIZE);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// naive implementation
hipLaunchKernelGGL(( matrixMul_compOpt), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("Threads perform computation optimization GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Loop Unrolling */
/****************************************************/
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, 4);
grid = dim3(WC / (BLOCK_SIZE*4), HC / BLOCK_SIZE);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// naive implementation
hipLaunchKernelGGL(( matrixMul_unroll), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("Loop unrolling GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Prefetching */
/****************************************************/
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, 4);
grid = dim3(WC / (BLOCK_SIZE*4), HC / BLOCK_SIZE);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// naive implementation
hipLaunchKernelGGL(( matrixMul_prefetch), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("Prefetching GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Cleaning */
/****************************************************/
// clean up memory
free(h_A);
free(h_B);
free(h_C);
#if CHECK_RESULT == 1
free(reference);
#endif
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipDeviceReset();
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void printDiff(float *data1, float *data2, int width, int height)
{
int i,j,k;
int error_count=0;
for (j=0; j<height; j++) {
for (i=0; i<width; i++) {
k = j*width+i;
if (fabs(data1[k] - data2[k]) > 0.1 ) {
printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f \n", i,j, data1[k], data2[k]);
error_count++;
}
}
}
printf("Total Errors = %d \n", error_count);
}
| 41fd9ce78b00007d481eea9d1880ae08a3ecb0c3.cu | /*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication and is exactly the same as
* Chapter 7 of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* CUBLAS provides high-performance matrix multiplication.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
//#include <cutil_inline.h>
//#include <helper_functions.h>
// includes, kernels
#include <matrixMul_gold.h>
#include <matrixMul_kernel.cuh>
#include <matrixMul_naive.cuh>
#include <matrixMul_tiling.cuh>
#include <matrixMul_coalescing.cuh>
#include <matrixMul_noBankConflict.cuh>
#include <matrixMul_compOpt.cuh>
#include <matrixMul_unroll.cuh>
#include <matrixMul_prefetch.cuh>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
void randomInit(float*, int);
void printDiff(float*, float*, int, int);
////////////////////////////////////////////////////////////////////////////////
// Helper Functions
////////////////////////////////////////////////////////////////////////////////
#ifndef STRNCASECMP
#define STRNCASECMP strncasecmp
#endif
inline int stringRemoveDelimiter(char delimiter, const char *string)
{
int string_start = 0;
while (string[string_start] == delimiter)
{
string_start++;
}
if (string_start >= (int)strlen(string)-1)
{
return 0;
}
return string_start;
}
inline bool checkCmdLineFlag(const int argc, const char **argv, const char *string_ref)
{
bool bFound = false;
if (argc >= 1)
{
for (int i=1; i < argc; i++)
{
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
const char *equal_pos = strchr(string_argv, '=');
int argv_length = (int)(equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv);
int length = (int)strlen(string_ref);
if (length == argv_length && !STRNCASECMP(string_argv, string_ref, length))
{
bFound = true;
continue;
}
}
}
return bFound;
}
inline int getCmdLineArgumentInt(const int argc, const char **argv, const char *string_ref)
{
bool bFound = false;
int value = -1;
if (argc >= 1)
{
for (int i=1; i < argc; i++)
{
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = (int)strlen(string_ref);
if (!STRNCASECMP(string_argv, string_ref, length))
{
if (length+1 <= (int)strlen(string_argv))
{
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = atoi(&string_argv[length + auto_inc]);
}
else
{
value = 0;
}
bFound = true;
continue;
}
}
}
if (bFound)
{
return value;
}
else
{
return 0;
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
runTest(argc, argv);
exit(EXIT_SUCCESS);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char** argv)
{
/****************************************************/
/* Preparations */
/****************************************************/
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
cudaSetDevice(devID);
}
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
if (error != cudaSuccess)
{
printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__);
}
error = cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// utilities
cudaEvent_t start;
cudaEvent_t stop;
float msecTotal;
// set seed for rand()
srand(2006);
// allocate host memory for matrices A and B
unsigned int size_A = WA * HA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = WB * HB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
float flop = 2 * (float)WC * (float)HC * (float)WA;
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A;
cudaMalloc((void**) &d_A, mem_size_A);
float* d_B;
cudaMalloc((void**) &d_B, mem_size_B);
// allocate device memory for result
unsigned int size_C = WC * HC;
unsigned int mem_size_C = sizeof(float) * size_C;
float* d_C;
cudaMalloc((void**) &d_C, mem_size_C);
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
#if CHECK_RESULT == 1
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// compute reference solution
float* reference = (float*) malloc(mem_size_C);
computeGold(reference, h_A, h_B, HA, WA, WB);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("Naive CPU (Golden Reference)\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#endif
dim3 threads,grid;
/****************************************************/
/* CUDA SDK example */
/****************************************************/
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// execute the kernel
matrixMul<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("GPU SDK Sample\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* naive implementation on GPU */
/****************************************************/
#if ENABLE_NAIVE == 1
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// naive implementation
matrixMul_naive<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("Naive GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
#endif
/****************************************************/
/* Tiling without global mem coalescing */
/****************************************************/
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// naive implementation
matrixMul_tiling<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("Tiling GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Global mem coalescing with smem bank conflict */
/****************************************************/
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// naive implementation
matrixMul_coalescing<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("Global mem coalescing GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Global mem coalescing w/o smem bank conflict */
/****************************************************/
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// naive implementation
matrixMul_noBankConflict<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("Remove shared mem bank conflict GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Threads perform computation optimizatin */
/****************************************************/
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, 4);
grid = dim3(WC / (BLOCK_SIZE*4), HC / BLOCK_SIZE);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// naive implementation
matrixMul_compOpt<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("Threads perform computation optimization GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Loop Unrolling */
/****************************************************/
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, 4);
grid = dim3(WC / (BLOCK_SIZE*4), HC / BLOCK_SIZE);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// naive implementation
matrixMul_unroll<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("Loop unrolling GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Prefetching */
/****************************************************/
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, 4);
grid = dim3(WC / (BLOCK_SIZE*4), HC / BLOCK_SIZE);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// naive implementation
matrixMul_prefetch<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("Prefetching GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Cleaning */
/****************************************************/
// clean up memory
free(h_A);
free(h_B);
free(h_C);
#if CHECK_RESULT == 1
free(reference);
#endif
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaThreadExit();
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void printDiff(float *data1, float *data2, int width, int height)
{
int i,j,k;
int error_count=0;
for (j=0; j<height; j++) {
for (i=0; i<width; i++) {
k = j*width+i;
if (fabs(data1[k] - data2[k]) > 0.1 ) {
printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f \n", i,j, data1[k], data2[k]);
error_count++;
}
}
}
printf("Total Errors = %d \n", error_count);
}
|
4e5ac260cba44e401c70f007816bf234b1cbbfba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "PQScanMultiPassPrecomputed.cuh"
#include "../GpuResources.h"
#include "PQCodeLoad.cuh"
#include "IVFUtils.cuh"
#include "../utils/ConversionOperators.cuh"
#include "../utils/DeviceTensor.cuh"
#include "../utils/DeviceUtils.h"
#include "../utils/Float16.cuh"
#include "../utils/LoadStoreOperators.cuh"
#include "../utils/MathOperators.cuh"
#include "../utils/StaticUtils.h"
#include <limits>
namespace faiss { namespace gpu {
// For precomputed codes, this calculates and loads code distances
// into smem
template <typename LookupT, typename LookupVecT>
inline __device__ void
loadPrecomputedTerm(LookupT* smem,
LookupT* term2Start,
LookupT* term3Start,
int numCodes) {
constexpr int kWordSize = sizeof(LookupVecT) / sizeof(LookupT);
// We can only use vector loads if the data is guaranteed to be
// aligned. The codes are innermost, so if it is evenly divisible,
// then any slice will be aligned.
if (numCodes % kWordSize == 0) {
constexpr int kUnroll = 2;
// Load the data by float4 for efficiency, and then handle any remainder
// limitVec is the number of whole vec words we can load, in terms
// of whole blocks performing the load
int limitVec = numCodes / (kUnroll * kWordSize * blockDim.x);
limitVec *= kUnroll * blockDim.x;
LookupVecT* smemV = (LookupVecT*) smem;
LookupVecT* term2StartV = (LookupVecT*) term2Start;
LookupVecT* term3StartV = (LookupVecT*) term3Start;
for (int i = threadIdx.x; i < limitVec; i += kUnroll * blockDim.x) {
LookupVecT vals[kUnroll];
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] =
LoadStore<LookupVecT>::load(&term2StartV[i + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
LookupVecT q =
LoadStore<LookupVecT>::load(&term3StartV[i + j * blockDim.x]);
vals[j] = Math<LookupVecT>::add(vals[j], q);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
LoadStore<LookupVecT>::store(&smemV[i + j * blockDim.x], vals[j]);
}
}
// This is where we start loading the remainder that does not evenly
// fit into kUnroll x blockDim.x
int remainder = limitVec * kWordSize;
for (int i = remainder + threadIdx.x; i < numCodes; i += blockDim.x) {
smem[i] = Math<LookupT>::add(term2Start[i], term3Start[i]);
}
} else {
// Potential unaligned load
constexpr int kUnroll = 4;
int limit = utils::roundDown(numCodes, kUnroll * blockDim.x);
int i = threadIdx.x;
for (; i < limit; i += kUnroll * blockDim.x) {
LookupT vals[kUnroll];
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] = term2Start[i + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] = Math<LookupT>::add(vals[j], term3Start[i + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
smem[i + j * blockDim.x] = vals[j];
}
}
for (; i < numCodes; i += blockDim.x) {
smem[i] = Math<LookupT>::add(term2Start[i], term3Start[i]);
}
}
}
template <int NumSubQuantizers, typename LookupT, typename LookupVecT>
__global__ void
pqScanPrecomputedMultiPass(Tensor<float, 2, true> queries,
Tensor<float, 2, true> precompTerm1,
Tensor<LookupT, 3, true> precompTerm2,
Tensor<LookupT, 3, true> precompTerm3,
Tensor<int, 2, true> topQueryToCentroid,
void** listCodes,
int* listLengths,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<float, 1, true> distance) {
// precomputed term 2 + 3 storage
// (sub q)(code id)
extern __shared__ char smemTerm23[];
LookupT* term23 = (LookupT*) smemTerm23;
// Each block handles a single query
auto queryId = blockIdx.y;
auto probeId = blockIdx.x;
auto codesPerSubQuantizer = precompTerm2.getSize(2);
auto precompTermSize = precompTerm2.getSize(1) * codesPerSubQuantizer;
// This is where we start writing out data
// We ensure that before the array (at offset -1), there is a 0 value
int outBase = *(prefixSumOffsets[queryId][probeId].data() - 1);
float* distanceOut = distance[outBase].data();
auto listId = topQueryToCentroid[queryId][probeId];
// Safety guard in case NaNs in input cause no list ID to be generated
if (listId == -1) {
return;
}
unsigned char* codeList = (unsigned char*) listCodes[listId];
int limit = listLengths[listId];
constexpr int kNumCode32 = NumSubQuantizers <= 4 ? 1 :
(NumSubQuantizers / 4);
unsigned int code32[kNumCode32];
unsigned int nextCode32[kNumCode32];
// We double-buffer the code loading, which improves memory utilization
if (threadIdx.x < limit) {
LoadCode32<NumSubQuantizers>::load(code32, codeList, threadIdx.x);
}
// Load precomputed terms 1, 2, 3
float term1 = precompTerm1[queryId][probeId];
loadPrecomputedTerm<LookupT, LookupVecT>(term23,
precompTerm2[listId].data(),
precompTerm3[queryId].data(),
precompTermSize);
// Prevent WAR dependencies
__syncthreads();
// Each thread handles one code element in the list, with a
// block-wide stride
for (int codeIndex = threadIdx.x;
codeIndex < limit;
codeIndex += blockDim.x) {
// Prefetch next codes
if (codeIndex + blockDim.x < limit) {
LoadCode32<NumSubQuantizers>::load(
nextCode32, codeList, codeIndex + blockDim.x);
}
float dist = term1;
#pragma unroll
for (int word = 0; word < kNumCode32; ++word) {
constexpr int kBytesPerCode32 =
NumSubQuantizers < 4 ? NumSubQuantizers : 4;
if (kBytesPerCode32 == 1) {
auto code = code32[0];
dist = ConvertTo<float>::to(term23[code]);
} else {
#pragma unroll
for (int byte = 0; byte < kBytesPerCode32; ++byte) {
auto code = getByte(code32[word], byte * 8, 8);
auto offset =
codesPerSubQuantizer * (word * kBytesPerCode32 + byte);
dist += ConvertTo<float>::to(term23[offset + code]);
}
}
}
// Write out intermediate distance result
// We do not maintain indices here, in order to reduce global
// memory traffic. Those are recovered in the final selection step.
distanceOut[codeIndex] = dist;
// Rotate buffers
#pragma unroll
for (int word = 0; word < kNumCode32; ++word) {
code32[word] = nextCode32[word];
}
}
}
void
runMultiPassTile(Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& precompTerm1,
NoTypeTensor<3, true>& precompTerm2,
NoTypeTensor<3, true>& precompTerm3,
Tensor<int, 2, true>& topQueryToCentroid,
bool useFloat16Lookup,
int bytesPerCode,
int numSubQuantizers,
int numSubQuantizerCodes,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
Tensor<char, 1, true>& thrustMem,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<float, 1, true>& allDistances,
Tensor<float, 3, true>& heapDistances,
Tensor<int, 3, true>& heapIndices,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices,
hipStream_t stream) {
// Calculate offset lengths, so we know where to write out
// intermediate results
runCalcListOffsets(topQueryToCentroid, listLengths, prefixSumOffsets,
thrustMem, stream);
// Convert all codes to a distance, and write out (distance,
// index) values for all intermediate results
{
auto kThreadsPerBlock = 256;
auto grid = dim3(topQueryToCentroid.getSize(1),
topQueryToCentroid.getSize(0));
auto block = dim3(kThreadsPerBlock);
// pq precomputed terms (2 + 3)
auto smem = sizeof(float);
#ifdef FAISS_USE_FLOAT16
if (useFloat16Lookup) {
smem = sizeof(half);
}
#endif
smem *= numSubQuantizers * numSubQuantizerCodes;
FAISS_ASSERT(smem <= getMaxSharedMemPerBlockCurrentDevice());
#define RUN_PQ_OPT(NUM_SUB_Q, LOOKUP_T, LOOKUP_VEC_T) \
do { \
auto precompTerm2T = precompTerm2.toTensor<LOOKUP_T>(); \
auto precompTerm3T = precompTerm3.toTensor<LOOKUP_T>(); \
\
hipLaunchKernelGGL(( pqScanPrecomputedMultiPass<NUM_SUB_Q, LOOKUP_T, LOOKUP_VEC_T>) \
, dim3(grid), dim3(block), smem, stream, \
queries, \
precompTerm1, \
precompTerm2T, \
precompTerm3T, \
topQueryToCentroid, \
listCodes.data().get(), \
listLengths.data().get(), \
prefixSumOffsets, \
allDistances); \
} while (0)
#ifdef FAISS_USE_FLOAT16
#define RUN_PQ(NUM_SUB_Q) \
do { \
if (useFloat16Lookup) { \
RUN_PQ_OPT(NUM_SUB_Q, half, Half8); \
} else { \
RUN_PQ_OPT(NUM_SUB_Q, float, float4); \
} \
} while (0)
#else
#define RUN_PQ(NUM_SUB_Q) \
do { \
RUN_PQ_OPT(NUM_SUB_Q, float, float4); \
} while (0)
#endif // FAISS_USE_FLOAT16
switch (bytesPerCode) {
case 1:
RUN_PQ(1);
break;
case 2:
RUN_PQ(2);
break;
case 3:
RUN_PQ(3);
break;
case 4:
RUN_PQ(4);
break;
case 8:
RUN_PQ(8);
break;
case 12:
RUN_PQ(12);
break;
case 16:
RUN_PQ(16);
break;
case 20:
RUN_PQ(20);
break;
case 24:
RUN_PQ(24);
break;
case 28:
RUN_PQ(28);
break;
case 32:
RUN_PQ(32);
break;
case 40:
RUN_PQ(40);
break;
case 48:
RUN_PQ(48);
break;
case 56:
RUN_PQ(56);
break;
case 64:
RUN_PQ(64);
break;
case 96:
RUN_PQ(96);
break;
default:
FAISS_ASSERT(false);
break;
}
CUDA_TEST_ERROR();
#undef RUN_PQ
#undef RUN_PQ_OPT
}
// k-select the output in chunks, to increase parallelism
runPass1SelectLists(prefixSumOffsets,
allDistances,
topQueryToCentroid.getSize(1),
k,
false, // L2 distance chooses smallest
heapDistances,
heapIndices,
stream);
// k-select final output
auto flatHeapDistances = heapDistances.downcastInner<2>();
auto flatHeapIndices = heapIndices.downcastInner<2>();
runPass2SelectLists(flatHeapDistances,
flatHeapIndices,
listIndices,
indicesOptions,
prefixSumOffsets,
topQueryToCentroid,
k,
false, // L2 distance chooses smallest
outDistances,
outIndices,
stream);
CUDA_TEST_ERROR();
}
void runPQScanMultiPassPrecomputed(Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& precompTerm1,
NoTypeTensor<3, true>& precompTerm2,
NoTypeTensor<3, true>& precompTerm3,
Tensor<int, 2, true>& topQueryToCentroid,
bool useFloat16Lookup,
int bytesPerCode,
int numSubQuantizers,
int numSubQuantizerCodes,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
int maxListLength,
int k,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<long, 2, true>& outIndices,
GpuResources* res) {
constexpr int kMinQueryTileSize = 8;
constexpr int kMaxQueryTileSize = 128;
constexpr int kThrustMemSize = 16384;
int nprobe = topQueryToCentroid.getSize(1);
auto& mem = res->getMemoryManagerCurrentDevice();
auto stream = res->getDefaultStreamCurrentDevice();
// Make a reservation for Thrust to do its dirty work (global memory
// cross-block reduction space); hopefully this is large enough.
DeviceTensor<char, 1, true> thrustMem1(
mem, {kThrustMemSize}, stream);
DeviceTensor<char, 1, true> thrustMem2(
mem, {kThrustMemSize}, stream);
DeviceTensor<char, 1, true>* thrustMem[2] =
{&thrustMem1, &thrustMem2};
// How much temporary storage is available?
// If possible, we'd like to fit within the space available.
size_t sizeAvailable = mem.getSizeAvailable();
// We run two passes of heap selection
// This is the size of the first-level heap passes
constexpr int kNProbeSplit = 8;
int pass2Chunks = ::min(nprobe, kNProbeSplit);
size_t sizeForFirstSelectPass =
pass2Chunks * k * (sizeof(float) + sizeof(int));
// How much temporary storage we need per each query
size_t sizePerQuery =
2 * // # streams
((nprobe * sizeof(int) + sizeof(int)) + // prefixSumOffsets
nprobe * maxListLength * sizeof(float) + // allDistances
sizeForFirstSelectPass);
int queryTileSize = (int) (sizeAvailable / sizePerQuery);
if (queryTileSize < kMinQueryTileSize) {
queryTileSize = kMinQueryTileSize;
} else if (queryTileSize > kMaxQueryTileSize) {
queryTileSize = kMaxQueryTileSize;
}
// FIXME: we should adjust queryTileSize to deal with this, since
// indexing is in int32
FAISS_ASSERT(queryTileSize * nprobe * maxListLength <=
std::numeric_limits<int>::max());
// Temporary memory buffers
// Make sure there is space prior to the start which will be 0, and
// will handle the boundary condition without branches
DeviceTensor<int, 1, true> prefixSumOffsetSpace1(
mem, {queryTileSize * nprobe + 1}, stream);
DeviceTensor<int, 1, true> prefixSumOffsetSpace2(
mem, {queryTileSize * nprobe + 1}, stream);
DeviceTensor<int, 2, true> prefixSumOffsets1(
prefixSumOffsetSpace1[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true> prefixSumOffsets2(
prefixSumOffsetSpace2[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true>* prefixSumOffsets[2] =
{&prefixSumOffsets1, &prefixSumOffsets2};
// Make sure the element before prefixSumOffsets is 0, since we
// depend upon simple, boundary-less indexing to get proper results
CUDA_VERIFY(hipMemsetAsync(prefixSumOffsetSpace1.data(),
0,
sizeof(int),
stream));
CUDA_VERIFY(hipMemsetAsync(prefixSumOffsetSpace2.data(),
0,
sizeof(int),
stream));
DeviceTensor<float, 1, true> allDistances1(
mem, {queryTileSize * nprobe * maxListLength}, stream);
DeviceTensor<float, 1, true> allDistances2(
mem, {queryTileSize * nprobe * maxListLength}, stream);
DeviceTensor<float, 1, true>* allDistances[2] =
{&allDistances1, &allDistances2};
DeviceTensor<float, 3, true> heapDistances1(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<float, 3, true> heapDistances2(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<float, 3, true>* heapDistances[2] =
{&heapDistances1, &heapDistances2};
DeviceTensor<int, 3, true> heapIndices1(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<int, 3, true> heapIndices2(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<int, 3, true>* heapIndices[2] =
{&heapIndices1, &heapIndices2};
auto streams = res->getAlternateStreamsCurrentDevice();
streamWait(streams, {stream});
int curStream = 0;
for (int query = 0; query < queries.getSize(0); query += queryTileSize) {
int numQueriesInTile =
::min(queryTileSize, queries.getSize(0) - query);
auto prefixSumOffsetsView =
prefixSumOffsets[curStream]->narrowOutermost(0, numQueriesInTile);
auto coarseIndicesView =
topQueryToCentroid.narrowOutermost(query, numQueriesInTile);
auto queryView =
queries.narrowOutermost(query, numQueriesInTile);
auto term1View =
precompTerm1.narrowOutermost(query, numQueriesInTile);
auto term3View =
precompTerm3.narrowOutermost(query, numQueriesInTile);
auto heapDistancesView =
heapDistances[curStream]->narrowOutermost(0, numQueriesInTile);
auto heapIndicesView =
heapIndices[curStream]->narrowOutermost(0, numQueriesInTile);
auto outDistanceView =
outDistances.narrowOutermost(query, numQueriesInTile);
auto outIndicesView =
outIndices.narrowOutermost(query, numQueriesInTile);
runMultiPassTile(queryView,
term1View,
precompTerm2,
term3View,
coarseIndicesView,
useFloat16Lookup,
bytesPerCode,
numSubQuantizers,
numSubQuantizerCodes,
listCodes,
listIndices,
indicesOptions,
listLengths,
*thrustMem[curStream],
prefixSumOffsetsView,
*allDistances[curStream],
heapDistancesView,
heapIndicesView,
k,
outDistanceView,
outIndicesView,
streams[curStream]);
curStream = (curStream + 1) % 2;
}
streamWait({stream}, streams);
}
} } // namespace
| 4e5ac260cba44e401c70f007816bf234b1cbbfba.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "PQScanMultiPassPrecomputed.cuh"
#include "../GpuResources.h"
#include "PQCodeLoad.cuh"
#include "IVFUtils.cuh"
#include "../utils/ConversionOperators.cuh"
#include "../utils/DeviceTensor.cuh"
#include "../utils/DeviceUtils.h"
#include "../utils/Float16.cuh"
#include "../utils/LoadStoreOperators.cuh"
#include "../utils/MathOperators.cuh"
#include "../utils/StaticUtils.h"
#include <limits>
namespace faiss { namespace gpu {
// For precomputed codes, this calculates and loads code distances
// into smem
template <typename LookupT, typename LookupVecT>
inline __device__ void
loadPrecomputedTerm(LookupT* smem,
LookupT* term2Start,
LookupT* term3Start,
int numCodes) {
constexpr int kWordSize = sizeof(LookupVecT) / sizeof(LookupT);
// We can only use vector loads if the data is guaranteed to be
// aligned. The codes are innermost, so if it is evenly divisible,
// then any slice will be aligned.
if (numCodes % kWordSize == 0) {
constexpr int kUnroll = 2;
// Load the data by float4 for efficiency, and then handle any remainder
// limitVec is the number of whole vec words we can load, in terms
// of whole blocks performing the load
int limitVec = numCodes / (kUnroll * kWordSize * blockDim.x);
limitVec *= kUnroll * blockDim.x;
LookupVecT* smemV = (LookupVecT*) smem;
LookupVecT* term2StartV = (LookupVecT*) term2Start;
LookupVecT* term3StartV = (LookupVecT*) term3Start;
for (int i = threadIdx.x; i < limitVec; i += kUnroll * blockDim.x) {
LookupVecT vals[kUnroll];
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] =
LoadStore<LookupVecT>::load(&term2StartV[i + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
LookupVecT q =
LoadStore<LookupVecT>::load(&term3StartV[i + j * blockDim.x]);
vals[j] = Math<LookupVecT>::add(vals[j], q);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
LoadStore<LookupVecT>::store(&smemV[i + j * blockDim.x], vals[j]);
}
}
// This is where we start loading the remainder that does not evenly
// fit into kUnroll x blockDim.x
int remainder = limitVec * kWordSize;
for (int i = remainder + threadIdx.x; i < numCodes; i += blockDim.x) {
smem[i] = Math<LookupT>::add(term2Start[i], term3Start[i]);
}
} else {
// Potential unaligned load
constexpr int kUnroll = 4;
int limit = utils::roundDown(numCodes, kUnroll * blockDim.x);
int i = threadIdx.x;
for (; i < limit; i += kUnroll * blockDim.x) {
LookupT vals[kUnroll];
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] = term2Start[i + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] = Math<LookupT>::add(vals[j], term3Start[i + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
smem[i + j * blockDim.x] = vals[j];
}
}
for (; i < numCodes; i += blockDim.x) {
smem[i] = Math<LookupT>::add(term2Start[i], term3Start[i]);
}
}
}
template <int NumSubQuantizers, typename LookupT, typename LookupVecT>
__global__ void
pqScanPrecomputedMultiPass(Tensor<float, 2, true> queries,
Tensor<float, 2, true> precompTerm1,
Tensor<LookupT, 3, true> precompTerm2,
Tensor<LookupT, 3, true> precompTerm3,
Tensor<int, 2, true> topQueryToCentroid,
void** listCodes,
int* listLengths,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<float, 1, true> distance) {
// precomputed term 2 + 3 storage
// (sub q)(code id)
extern __shared__ char smemTerm23[];
LookupT* term23 = (LookupT*) smemTerm23;
// Each block handles a single query
auto queryId = blockIdx.y;
auto probeId = blockIdx.x;
auto codesPerSubQuantizer = precompTerm2.getSize(2);
auto precompTermSize = precompTerm2.getSize(1) * codesPerSubQuantizer;
// This is where we start writing out data
// We ensure that before the array (at offset -1), there is a 0 value
int outBase = *(prefixSumOffsets[queryId][probeId].data() - 1);
float* distanceOut = distance[outBase].data();
auto listId = topQueryToCentroid[queryId][probeId];
// Safety guard in case NaNs in input cause no list ID to be generated
if (listId == -1) {
return;
}
unsigned char* codeList = (unsigned char*) listCodes[listId];
int limit = listLengths[listId];
constexpr int kNumCode32 = NumSubQuantizers <= 4 ? 1 :
(NumSubQuantizers / 4);
unsigned int code32[kNumCode32];
unsigned int nextCode32[kNumCode32];
// We double-buffer the code loading, which improves memory utilization
if (threadIdx.x < limit) {
LoadCode32<NumSubQuantizers>::load(code32, codeList, threadIdx.x);
}
// Load precomputed terms 1, 2, 3
float term1 = precompTerm1[queryId][probeId];
loadPrecomputedTerm<LookupT, LookupVecT>(term23,
precompTerm2[listId].data(),
precompTerm3[queryId].data(),
precompTermSize);
// Prevent WAR dependencies
__syncthreads();
// Each thread handles one code element in the list, with a
// block-wide stride
for (int codeIndex = threadIdx.x;
codeIndex < limit;
codeIndex += blockDim.x) {
// Prefetch next codes
if (codeIndex + blockDim.x < limit) {
LoadCode32<NumSubQuantizers>::load(
nextCode32, codeList, codeIndex + blockDim.x);
}
float dist = term1;
#pragma unroll
for (int word = 0; word < kNumCode32; ++word) {
constexpr int kBytesPerCode32 =
NumSubQuantizers < 4 ? NumSubQuantizers : 4;
if (kBytesPerCode32 == 1) {
auto code = code32[0];
dist = ConvertTo<float>::to(term23[code]);
} else {
#pragma unroll
for (int byte = 0; byte < kBytesPerCode32; ++byte) {
auto code = getByte(code32[word], byte * 8, 8);
auto offset =
codesPerSubQuantizer * (word * kBytesPerCode32 + byte);
dist += ConvertTo<float>::to(term23[offset + code]);
}
}
}
// Write out intermediate distance result
// We do not maintain indices here, in order to reduce global
// memory traffic. Those are recovered in the final selection step.
distanceOut[codeIndex] = dist;
// Rotate buffers
#pragma unroll
for (int word = 0; word < kNumCode32; ++word) {
code32[word] = nextCode32[word];
}
}
}
void
runMultiPassTile(Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& precompTerm1,
NoTypeTensor<3, true>& precompTerm2,
NoTypeTensor<3, true>& precompTerm3,
Tensor<int, 2, true>& topQueryToCentroid,
bool useFloat16Lookup,
int bytesPerCode,
int numSubQuantizers,
int numSubQuantizerCodes,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
Tensor<char, 1, true>& thrustMem,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<float, 1, true>& allDistances,
Tensor<float, 3, true>& heapDistances,
Tensor<int, 3, true>& heapIndices,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices,
cudaStream_t stream) {
// Calculate offset lengths, so we know where to write out
// intermediate results
runCalcListOffsets(topQueryToCentroid, listLengths, prefixSumOffsets,
thrustMem, stream);
// Convert all codes to a distance, and write out (distance,
// index) values for all intermediate results
{
auto kThreadsPerBlock = 256;
auto grid = dim3(topQueryToCentroid.getSize(1),
topQueryToCentroid.getSize(0));
auto block = dim3(kThreadsPerBlock);
// pq precomputed terms (2 + 3)
auto smem = sizeof(float);
#ifdef FAISS_USE_FLOAT16
if (useFloat16Lookup) {
smem = sizeof(half);
}
#endif
smem *= numSubQuantizers * numSubQuantizerCodes;
FAISS_ASSERT(smem <= getMaxSharedMemPerBlockCurrentDevice());
#define RUN_PQ_OPT(NUM_SUB_Q, LOOKUP_T, LOOKUP_VEC_T) \
do { \
auto precompTerm2T = precompTerm2.toTensor<LOOKUP_T>(); \
auto precompTerm3T = precompTerm3.toTensor<LOOKUP_T>(); \
\
pqScanPrecomputedMultiPass<NUM_SUB_Q, LOOKUP_T, LOOKUP_VEC_T> \
<<<grid, block, smem, stream>>>( \
queries, \
precompTerm1, \
precompTerm2T, \
precompTerm3T, \
topQueryToCentroid, \
listCodes.data().get(), \
listLengths.data().get(), \
prefixSumOffsets, \
allDistances); \
} while (0)
#ifdef FAISS_USE_FLOAT16
#define RUN_PQ(NUM_SUB_Q) \
do { \
if (useFloat16Lookup) { \
RUN_PQ_OPT(NUM_SUB_Q, half, Half8); \
} else { \
RUN_PQ_OPT(NUM_SUB_Q, float, float4); \
} \
} while (0)
#else
#define RUN_PQ(NUM_SUB_Q) \
do { \
RUN_PQ_OPT(NUM_SUB_Q, float, float4); \
} while (0)
#endif // FAISS_USE_FLOAT16
switch (bytesPerCode) {
case 1:
RUN_PQ(1);
break;
case 2:
RUN_PQ(2);
break;
case 3:
RUN_PQ(3);
break;
case 4:
RUN_PQ(4);
break;
case 8:
RUN_PQ(8);
break;
case 12:
RUN_PQ(12);
break;
case 16:
RUN_PQ(16);
break;
case 20:
RUN_PQ(20);
break;
case 24:
RUN_PQ(24);
break;
case 28:
RUN_PQ(28);
break;
case 32:
RUN_PQ(32);
break;
case 40:
RUN_PQ(40);
break;
case 48:
RUN_PQ(48);
break;
case 56:
RUN_PQ(56);
break;
case 64:
RUN_PQ(64);
break;
case 96:
RUN_PQ(96);
break;
default:
FAISS_ASSERT(false);
break;
}
CUDA_TEST_ERROR();
#undef RUN_PQ
#undef RUN_PQ_OPT
}
// k-select the output in chunks, to increase parallelism
runPass1SelectLists(prefixSumOffsets,
allDistances,
topQueryToCentroid.getSize(1),
k,
false, // L2 distance chooses smallest
heapDistances,
heapIndices,
stream);
// k-select final output
auto flatHeapDistances = heapDistances.downcastInner<2>();
auto flatHeapIndices = heapIndices.downcastInner<2>();
runPass2SelectLists(flatHeapDistances,
flatHeapIndices,
listIndices,
indicesOptions,
prefixSumOffsets,
topQueryToCentroid,
k,
false, // L2 distance chooses smallest
outDistances,
outIndices,
stream);
CUDA_TEST_ERROR();
}
void runPQScanMultiPassPrecomputed(Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& precompTerm1,
NoTypeTensor<3, true>& precompTerm2,
NoTypeTensor<3, true>& precompTerm3,
Tensor<int, 2, true>& topQueryToCentroid,
bool useFloat16Lookup,
int bytesPerCode,
int numSubQuantizers,
int numSubQuantizerCodes,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
int maxListLength,
int k,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<long, 2, true>& outIndices,
GpuResources* res) {
constexpr int kMinQueryTileSize = 8;
constexpr int kMaxQueryTileSize = 128;
constexpr int kThrustMemSize = 16384;
int nprobe = topQueryToCentroid.getSize(1);
auto& mem = res->getMemoryManagerCurrentDevice();
auto stream = res->getDefaultStreamCurrentDevice();
// Make a reservation for Thrust to do its dirty work (global memory
// cross-block reduction space); hopefully this is large enough.
DeviceTensor<char, 1, true> thrustMem1(
mem, {kThrustMemSize}, stream);
DeviceTensor<char, 1, true> thrustMem2(
mem, {kThrustMemSize}, stream);
DeviceTensor<char, 1, true>* thrustMem[2] =
{&thrustMem1, &thrustMem2};
// How much temporary storage is available?
// If possible, we'd like to fit within the space available.
size_t sizeAvailable = mem.getSizeAvailable();
// We run two passes of heap selection
// This is the size of the first-level heap passes
constexpr int kNProbeSplit = 8;
int pass2Chunks = std::min(nprobe, kNProbeSplit);
size_t sizeForFirstSelectPass =
pass2Chunks * k * (sizeof(float) + sizeof(int));
// How much temporary storage we need per each query
size_t sizePerQuery =
2 * // # streams
((nprobe * sizeof(int) + sizeof(int)) + // prefixSumOffsets
nprobe * maxListLength * sizeof(float) + // allDistances
sizeForFirstSelectPass);
int queryTileSize = (int) (sizeAvailable / sizePerQuery);
if (queryTileSize < kMinQueryTileSize) {
queryTileSize = kMinQueryTileSize;
} else if (queryTileSize > kMaxQueryTileSize) {
queryTileSize = kMaxQueryTileSize;
}
// FIXME: we should adjust queryTileSize to deal with this, since
// indexing is in int32
FAISS_ASSERT(queryTileSize * nprobe * maxListLength <=
std::numeric_limits<int>::max());
// Temporary memory buffers
// Make sure there is space prior to the start which will be 0, and
// will handle the boundary condition without branches
DeviceTensor<int, 1, true> prefixSumOffsetSpace1(
mem, {queryTileSize * nprobe + 1}, stream);
DeviceTensor<int, 1, true> prefixSumOffsetSpace2(
mem, {queryTileSize * nprobe + 1}, stream);
DeviceTensor<int, 2, true> prefixSumOffsets1(
prefixSumOffsetSpace1[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true> prefixSumOffsets2(
prefixSumOffsetSpace2[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true>* prefixSumOffsets[2] =
{&prefixSumOffsets1, &prefixSumOffsets2};
// Make sure the element before prefixSumOffsets is 0, since we
// depend upon simple, boundary-less indexing to get proper results
CUDA_VERIFY(cudaMemsetAsync(prefixSumOffsetSpace1.data(),
0,
sizeof(int),
stream));
CUDA_VERIFY(cudaMemsetAsync(prefixSumOffsetSpace2.data(),
0,
sizeof(int),
stream));
DeviceTensor<float, 1, true> allDistances1(
mem, {queryTileSize * nprobe * maxListLength}, stream);
DeviceTensor<float, 1, true> allDistances2(
mem, {queryTileSize * nprobe * maxListLength}, stream);
DeviceTensor<float, 1, true>* allDistances[2] =
{&allDistances1, &allDistances2};
DeviceTensor<float, 3, true> heapDistances1(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<float, 3, true> heapDistances2(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<float, 3, true>* heapDistances[2] =
{&heapDistances1, &heapDistances2};
DeviceTensor<int, 3, true> heapIndices1(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<int, 3, true> heapIndices2(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<int, 3, true>* heapIndices[2] =
{&heapIndices1, &heapIndices2};
auto streams = res->getAlternateStreamsCurrentDevice();
streamWait(streams, {stream});
int curStream = 0;
for (int query = 0; query < queries.getSize(0); query += queryTileSize) {
int numQueriesInTile =
std::min(queryTileSize, queries.getSize(0) - query);
auto prefixSumOffsetsView =
prefixSumOffsets[curStream]->narrowOutermost(0, numQueriesInTile);
auto coarseIndicesView =
topQueryToCentroid.narrowOutermost(query, numQueriesInTile);
auto queryView =
queries.narrowOutermost(query, numQueriesInTile);
auto term1View =
precompTerm1.narrowOutermost(query, numQueriesInTile);
auto term3View =
precompTerm3.narrowOutermost(query, numQueriesInTile);
auto heapDistancesView =
heapDistances[curStream]->narrowOutermost(0, numQueriesInTile);
auto heapIndicesView =
heapIndices[curStream]->narrowOutermost(0, numQueriesInTile);
auto outDistanceView =
outDistances.narrowOutermost(query, numQueriesInTile);
auto outIndicesView =
outIndices.narrowOutermost(query, numQueriesInTile);
runMultiPassTile(queryView,
term1View,
precompTerm2,
term3View,
coarseIndicesView,
useFloat16Lookup,
bytesPerCode,
numSubQuantizers,
numSubQuantizerCodes,
listCodes,
listIndices,
indicesOptions,
listLengths,
*thrustMem[curStream],
prefixSumOffsetsView,
*allDistances[curStream],
heapDistancesView,
heapIndicesView,
k,
outDistanceView,
outIndicesView,
streams[curStream]);
curStream = (curStream + 1) % 2;
}
streamWait({stream}, streams);
}
} } // namespace
|
62311dae47155a4f48f18254f2ec7ead289931f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "searchsorted_cuda_kernel.h"
template <typename scalar_t>
__device__
int eval(scalar_t val, scalar_t *a, int64_t row, int64_t col, int64_t ncol, bool side_left)
{
/* Evaluates whether a[row,col] < val <= a[row, col+1]*/
if (col == ncol - 1)
{
// special case: we are on the right border
if (a[row * ncol + col] <= val){
return 1;}
else {
return -1;}
}
bool is_lower;
bool is_next_higher;
if (side_left) {
// a[row, col] < v <= a[row, col+1]
is_lower = (a[row * ncol + col] < val);
is_next_higher = (a[row*ncol + col + 1] >= val);
} else {
// a[row, col] <= v < a[row, col+1]
is_lower = (a[row * ncol + col] <= val);
is_next_higher = (a[row * ncol + col + 1] > val);
}
if (is_lower && is_next_higher) {
// we found the right spot
return 0;
} else if (is_lower) {
// answer is on the right side
return 1;
} else {
// answer is on the left side
return -1;
}
}
template <typename scalar_t>
__device__
int binary_search(scalar_t *a, int64_t row, scalar_t val, int64_t ncol, bool side_left)
{
/* Look for the value `val` within row `row` of matrix `a`, which
has `ncol` columns.
the `a` matrix is assumed sorted in increasing order, row-wise
Returns
* -1 if `val` is smaller than the smallest value found within that row of `a`
* `ncol` - 1 if `val` is larger than the largest element of that row of `a`
* Otherwise, return the column index `res` such that:
- a[row, col] < val <= a[row, col+1]. (if side_left), or
- a[row, col] < val <= a[row, col+1] (if not side_left).
*/
//start with left at 0 and right at number of columns of a
int64_t right = ncol;
int64_t left = 0;
while (right >= left) {
// take the midpoint of current left and right cursors
int64_t mid = left + (right-left)/2;
// check the relative position of val: are we good here ?
int rel_pos = eval(val, a, row, mid, ncol, side_left);
// we found the point
if(rel_pos == 0) {
return mid;
} else if (rel_pos > 0) {
if (mid==ncol-1){return ncol-1;}
// the answer is on the right side
left = mid;
} else {
if (mid==0){return -1;}
right = mid;
}
}
return -1;
}
template <typename scalar_t>
__global__
void searchsorted_kernel(
int64_t *res,
scalar_t *a,
scalar_t *v,
int64_t nrow_res, int64_t nrow_a, int64_t nrow_v, int64_t ncol_a, int64_t ncol_v, bool side_left)
{
// get current row and column
int64_t row = blockIdx.y*blockDim.y+threadIdx.y;
int64_t col = blockIdx.x*blockDim.x+threadIdx.x;
// check whether we are outside the bounds of what needs be computed.
if ((row >= nrow_res) || (col >= ncol_v)) {
return;}
// get the value to look for
int64_t row_in_v = (nrow_v==1) ? 0: row;
int64_t row_in_a = (nrow_a==1) ? 0: row;
int64_t idx_in_v = row_in_v*ncol_v+col;
int64_t idx_in_res = row*ncol_v+col;
// apply binary search
res[idx_in_res] = binary_search(a, row_in_a, v[idx_in_v], ncol_a, side_left)+1;
}
void searchsorted_cuda(
at::Tensor a,
at::Tensor v,
at::Tensor res,
bool side_left){
// Get the dimensions
auto nrow_a = a.size(/*dim=*/0);
auto nrow_v = v.size(/*dim=*/0);
auto ncol_a = a.size(/*dim=*/1);
auto ncol_v = v.size(/*dim=*/1);
auto nrow_res = fmax(double(nrow_a), double(nrow_v));
// prepare the kernel configuration
dim3 threads(ncol_v, nrow_res);
dim3 blocks(1, 1);
if (nrow_res*ncol_v > 1024){
threads.x = int(fmin(double(1024), double(ncol_v)));
threads.y = floor(1024/threads.x);
blocks.x = ceil(double(ncol_v)/double(threads.x));
blocks.y = ceil(double(nrow_res)/double(threads.y));
}
AT_DISPATCH_ALL_TYPES(a.scalar_type(), "searchsorted cuda", ([&] {
hipLaunchKernelGGL(( searchsorted_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
res.data_ptr<int64_t>(),
a.data_ptr<scalar_t>(),
v.data_ptr<scalar_t>(),
nrow_res, nrow_a, nrow_v, ncol_a, ncol_v, side_left);
}));
}
| 62311dae47155a4f48f18254f2ec7ead289931f9.cu | #include "searchsorted_cuda_kernel.h"
template <typename scalar_t>
__device__
int eval(scalar_t val, scalar_t *a, int64_t row, int64_t col, int64_t ncol, bool side_left)
{
/* Evaluates whether a[row,col] < val <= a[row, col+1]*/
if (col == ncol - 1)
{
// special case: we are on the right border
if (a[row * ncol + col] <= val){
return 1;}
else {
return -1;}
}
bool is_lower;
bool is_next_higher;
if (side_left) {
// a[row, col] < v <= a[row, col+1]
is_lower = (a[row * ncol + col] < val);
is_next_higher = (a[row*ncol + col + 1] >= val);
} else {
// a[row, col] <= v < a[row, col+1]
is_lower = (a[row * ncol + col] <= val);
is_next_higher = (a[row * ncol + col + 1] > val);
}
if (is_lower && is_next_higher) {
// we found the right spot
return 0;
} else if (is_lower) {
// answer is on the right side
return 1;
} else {
// answer is on the left side
return -1;
}
}
template <typename scalar_t>
__device__
int binary_search(scalar_t *a, int64_t row, scalar_t val, int64_t ncol, bool side_left)
{
/* Look for the value `val` within row `row` of matrix `a`, which
has `ncol` columns.
the `a` matrix is assumed sorted in increasing order, row-wise
Returns
* -1 if `val` is smaller than the smallest value found within that row of `a`
* `ncol` - 1 if `val` is larger than the largest element of that row of `a`
* Otherwise, return the column index `res` such that:
- a[row, col] < val <= a[row, col+1]. (if side_left), or
- a[row, col] < val <= a[row, col+1] (if not side_left).
*/
//start with left at 0 and right at number of columns of a
int64_t right = ncol;
int64_t left = 0;
while (right >= left) {
// take the midpoint of current left and right cursors
int64_t mid = left + (right-left)/2;
// check the relative position of val: are we good here ?
int rel_pos = eval(val, a, row, mid, ncol, side_left);
// we found the point
if(rel_pos == 0) {
return mid;
} else if (rel_pos > 0) {
if (mid==ncol-1){return ncol-1;}
// the answer is on the right side
left = mid;
} else {
if (mid==0){return -1;}
right = mid;
}
}
return -1;
}
template <typename scalar_t>
__global__
void searchsorted_kernel(
int64_t *res,
scalar_t *a,
scalar_t *v,
int64_t nrow_res, int64_t nrow_a, int64_t nrow_v, int64_t ncol_a, int64_t ncol_v, bool side_left)
{
// get current row and column
int64_t row = blockIdx.y*blockDim.y+threadIdx.y;
int64_t col = blockIdx.x*blockDim.x+threadIdx.x;
// check whether we are outside the bounds of what needs be computed.
if ((row >= nrow_res) || (col >= ncol_v)) {
return;}
// get the value to look for
int64_t row_in_v = (nrow_v==1) ? 0: row;
int64_t row_in_a = (nrow_a==1) ? 0: row;
int64_t idx_in_v = row_in_v*ncol_v+col;
int64_t idx_in_res = row*ncol_v+col;
// apply binary search
res[idx_in_res] = binary_search(a, row_in_a, v[idx_in_v], ncol_a, side_left)+1;
}
void searchsorted_cuda(
at::Tensor a,
at::Tensor v,
at::Tensor res,
bool side_left){
// Get the dimensions
auto nrow_a = a.size(/*dim=*/0);
auto nrow_v = v.size(/*dim=*/0);
auto ncol_a = a.size(/*dim=*/1);
auto ncol_v = v.size(/*dim=*/1);
auto nrow_res = fmax(double(nrow_a), double(nrow_v));
// prepare the kernel configuration
dim3 threads(ncol_v, nrow_res);
dim3 blocks(1, 1);
if (nrow_res*ncol_v > 1024){
threads.x = int(fmin(double(1024), double(ncol_v)));
threads.y = floor(1024/threads.x);
blocks.x = ceil(double(ncol_v)/double(threads.x));
blocks.y = ceil(double(nrow_res)/double(threads.y));
}
AT_DISPATCH_ALL_TYPES(a.scalar_type(), "searchsorted cuda", ([&] {
searchsorted_kernel<scalar_t><<<blocks, threads>>>(
res.data_ptr<int64_t>(),
a.data_ptr<scalar_t>(),
v.data_ptr<scalar_t>(),
nrow_res, nrow_a, nrow_v, ncol_a, ncol_v, side_left);
}));
}
|
79d8466a1d355477bcc1cd5c30c6733d13ab7ef0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* @Author: X Wang, Y xiao, Ch Yang, G Ye
* @Date: 2019-06-17 01:03:01
* @Last Modified by: X Wang, Y Xiao, Ch Yang, G Ye
* @Last Modified time: 2019-06-17 10:51:30
* @file description:
blur image using gauss kernel
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "blur.h"
#include "common.h"
#define PI 3.141593
// The kernel of gauss using bluring img;
// And the size of gauss kernel is in [1, 8];
__constant__ float gauss_kernel[2500];
/*
S
*/
__global__ void kernel_blur(int S, int img_height, int img_width, int *res_img,
const int *__restrict__ src_img) {
int block_id = blockIdx.y * gridDim.x + blockIdx.x;
int thread_id = block_id * blockDim.x + threadIdx.x;
int i = thread_id / img_width, j = thread_id % img_width;
if (thread_id < img_height * img_width) {
int gauss_index = 0;
float rgb[3] = {0, 0, 0};
for (int row = i - 3 * S; row <= i + 3 * S; ++row) {
for (int col = j - 3 * S; col <= j + 3 * S; ++col) {
int src_img_value;
float gauss_kernel_value = gauss_kernel[gauss_index++];
//
if (row >= 0 && row < img_height && col >= 0 && col < img_width) {
src_img_value = src_img[row * img_width + col];
//
} else {
int reflect_row = i + (i - row);
int reflect_col = j + (j - col);
src_img_value = src_img[reflect_row * img_width + reflect_col];
}
for (int k = 2; k >= 0; --k) {
rgb[k] += gauss_kernel_value * (src_img_value & 255);
src_img_value >>= 8;
}
}
}
for (int i = 0; i < 3; ++i) {
rgb[i] = rgb[i] < 0 ? 0 : (rgb[i] > 255 ? 255 : rgb[i]);
}
res_img[thread_id] = (int(rgb[0]) << 16) + (int(rgb[1]) << 8) + rgb[2];
}
}
/*
Return::
@Int array: the result image pixel array after blur
*/
int *imgBlur(int *src_img, int img_height, int img_width) {
int S = 2;
calculateGaussKernel(S);
int img_size = img_height * img_width;
int img_size_bytes = img_size * sizeof(int);
int *h_res_img = (int *)malloc(img_size_bytes);
int *d_src_img = NULL, *d_res_img = NULL;
hipMalloc((void **)&d_src_img, img_size_bytes);
hipMalloc((void **)&d_res_img, img_size_bytes);
dim3 block(1024, 1, 1), grid(1, 1, 1);
if (img_size < 1024) {
block.x = img_size;
} else {
grid.x = updiv(img_size, 1024);
}
hipMemcpy(d_src_img, src_img, img_size_bytes, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_blur), dim3(grid), dim3(block), 0, 0, S, img_height, img_width, d_res_img, d_src_img);
hipMemcpy(h_res_img, d_res_img, img_size_bytes, hipMemcpyDeviceToHost);
hipFree(d_res_img);
hipFree(d_src_img);
return h_res_img;
}
/*
S
*/
void calculateGaussKernel(int S) {
int n = 6 * S + 1;
int size = sizeof(float) * n * n;
float *h_gauss_kernel = (float *)malloc(size);
float sum = 0;
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
int x = i - 3 * S, y = j - 3 * S;
h_gauss_kernel[i * n + j] =
1 / (S * sqrt(2 * PI)) * exp(-1.0 * (x * x + y * y) / (2 * S * S));
sum += h_gauss_kernel[i * n + j];
}
}
//
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
h_gauss_kernel[i * n + j] /= sum;
}
}
// cuda constant
hipMemcpyToSymbol(gauss_kernel, h_gauss_kernel, size);
}
| 79d8466a1d355477bcc1cd5c30c6733d13ab7ef0.cu | /*
* @Author: X Wang, Y xiao, Ch Yang, G Ye
* @Date: 2019-06-17 01:03:01
* @Last Modified by: X Wang, Y Xiao, Ch Yang, G Ye
* @Last Modified time: 2019-06-17 10:51:30
* @file description:
blur image using gauss kernel
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "blur.h"
#include "common.h"
#define PI 3.141593
// The kernel of gauss using bluring img;
// And the size of gauss kernel is in [1, 8];
__constant__ float gauss_kernel[2500];
/*
图像模糊处理,每个线程负责一个像素点,S为高斯核大小
*/
__global__ void kernel_blur(int S, int img_height, int img_width, int *res_img,
const int *__restrict__ src_img) {
int block_id = blockIdx.y * gridDim.x + blockIdx.x;
int thread_id = block_id * blockDim.x + threadIdx.x;
int i = thread_id / img_width, j = thread_id % img_width;
if (thread_id < img_height * img_width) {
int gauss_index = 0;
float rgb[3] = {0, 0, 0};
for (int row = i - 3 * S; row <= i + 3 * S; ++row) {
for (int col = j - 3 * S; col <= j + 3 * S; ++col) {
int src_img_value;
float gauss_kernel_value = gauss_kernel[gauss_index++];
// 如果该点没越界
if (row >= 0 && row < img_height && col >= 0 && col < img_width) {
src_img_value = src_img[row * img_width + col];
// 如果该点越界,取该点与中心对称的点
} else {
int reflect_row = i + (i - row);
int reflect_col = j + (j - col);
src_img_value = src_img[reflect_row * img_width + reflect_col];
}
for (int k = 2; k >= 0; --k) {
rgb[k] += gauss_kernel_value * (src_img_value & 255);
src_img_value >>= 8;
}
}
}
for (int i = 0; i < 3; ++i) {
rgb[i] = rgb[i] < 0 ? 0 : (rgb[i] > 255 ? 255 : rgb[i]);
}
res_img[thread_id] = (int(rgb[0]) << 16) + (int(rgb[1]) << 8) + rgb[2];
}
}
/*
使用高斯核进行图像模糊处理
Return::
@Int array: the result image pixel array after blur
*/
int *imgBlur(int *src_img, int img_height, int img_width) {
int S = 2;
calculateGaussKernel(S);
int img_size = img_height * img_width;
int img_size_bytes = img_size * sizeof(int);
int *h_res_img = (int *)malloc(img_size_bytes);
int *d_src_img = NULL, *d_res_img = NULL;
cudaMalloc((void **)&d_src_img, img_size_bytes);
cudaMalloc((void **)&d_res_img, img_size_bytes);
dim3 block(1024, 1, 1), grid(1, 1, 1);
if (img_size < 1024) {
block.x = img_size;
} else {
grid.x = updiv(img_size, 1024);
}
cudaMemcpy(d_src_img, src_img, img_size_bytes, cudaMemcpyHostToDevice);
kernel_blur<<<grid, block>>>(S, img_height, img_width, d_res_img, d_src_img);
cudaMemcpy(h_res_img, d_res_img, img_size_bytes, cudaMemcpyDeviceToHost);
cudaFree(d_res_img);
cudaFree(d_src_img);
return h_res_img;
}
/*
计算高斯核,S为高斯核大小
*/
void calculateGaussKernel(int S) {
int n = 6 * S + 1;
int size = sizeof(float) * n * n;
float *h_gauss_kernel = (float *)malloc(size);
float sum = 0;
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
int x = i - 3 * S, y = j - 3 * S;
h_gauss_kernel[i * n + j] =
1 / (S * sqrt(2 * PI)) * exp(-1.0 * (x * x + y * y) / (2 * S * S));
sum += h_gauss_kernel[i * n + j];
}
}
// 对高斯核进行归一化
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
h_gauss_kernel[i * n + j] /= sum;
}
}
// 将计算的结果拷贝到cuda constant内存里
cudaMemcpyToSymbol(gauss_kernel, h_gauss_kernel, size);
}
|
930a6193dd6c179e981be91c93fe3eca38e59bf4.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void convolution(float* inputImage , float* outputImage, float* filter, int imageWidth, int imageHeight, int filterWidth) {
// To avoid error caused by the floating number, use the following pseudo code
//
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
int i = blockIdx.y * blockDim.y + threadIdx.y; /*raccoon: y = row*/
int j = blockIdx.x * blockDim.x + threadIdx.x; /*raccoon: x = column*/
float sum = 0;
int halffilterSize = filterWidth / 2;
int k, l;
for(k = -halffilterSize; k <= halffilterSize; k++){
for(l = halffilterSize; l <=halffilterSize; l++){
if(i + k >= 0 && i + k < imageHeight && j + l >= 0 && j + l < imageWidth){
sum += inputImage[(i + k) * imageWidth + j + l] *
filter[(k + halffilterSize) * filterWidth + l + halffilterSize];
}
}
}
outputImage[i * imageWidth + j] = sum;
}
// Host front-end function that allocates the memory and launches the GPU kernel
//raccoon:img = output
//raccoon:resX = width
//raccoon:resY = height
void hostFE (float* inputImage, float* outputImage, float* filter, int imageWidth, int imageHeight, int filterWidth)
{
/*------------------raccoon------------------------*/
size_t imageSize = imageHeight * imageWidth * sizeof(float);
size_t filterSize = filterWidth * filterWidth * sizeof(float);
float* source_image;
float* output_image;
float* kernel_filter;
hipMalloc(&source_image, imageSize);
hipMalloc(&output_image, imageSize);
hipMalloc(&kernel_filter, filterSize);
hipMemcpy(source_image, inputImage, imageSize, hipMemcpyHostToDevice);
hipMemcpy(output_image, outputImage, imageSize, hipMemcpyHostToDevice);
hipMemcpy(kernel_filter, filter, filterSize, hipMemcpyHostToDevice);
dim3 dimBlock(8, 8);
dim3 dimGrid(imageWidth / dimBlock.x, imageHeight / dimBlock.y);
hipLaunchKernelGGL(( convolution) , dim3(dimGrid), dim3(dimBlock), 0, 0, source_image, output_image, kernel_filter, imageWidth, imageHeight, filterWidth);
// hipMemcpy(, result, size, hipMemcpyDeviceToHost);
// hipFree(result);
}
| 930a6193dd6c179e981be91c93fe3eca38e59bf4.cu | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void convolution(float* inputImage , float* outputImage, float* filter, int imageWidth, int imageHeight, int filterWidth) {
// To avoid error caused by the floating number, use the following pseudo code
//
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
int i = blockIdx.y * blockDim.y + threadIdx.y; /*raccoon: y = row*/
int j = blockIdx.x * blockDim.x + threadIdx.x; /*raccoon: x = column*/
float sum = 0;
int halffilterSize = filterWidth / 2;
int k, l;
for(k = -halffilterSize; k <= halffilterSize; k++){
for(l = halffilterSize; l <=halffilterSize; l++){
if(i + k >= 0 && i + k < imageHeight && j + l >= 0 && j + l < imageWidth){
sum += inputImage[(i + k) * imageWidth + j + l] *
filter[(k + halffilterSize) * filterWidth + l + halffilterSize];
}
}
}
outputImage[i * imageWidth + j] = sum;
}
// Host front-end function that allocates the memory and launches the GPU kernel
//raccoon:img = output
//raccoon:resX = width
//raccoon:resY = height
void hostFE (float* inputImage, float* outputImage, float* filter, int imageWidth, int imageHeight, int filterWidth)
{
/*------------------raccoon------------------------*/
size_t imageSize = imageHeight * imageWidth * sizeof(float);
size_t filterSize = filterWidth * filterWidth * sizeof(float);
float* source_image;
float* output_image;
float* kernel_filter;
cudaMalloc(&source_image, imageSize);
cudaMalloc(&output_image, imageSize);
cudaMalloc(&kernel_filter, filterSize);
cudaMemcpy(source_image, inputImage, imageSize, cudaMemcpyHostToDevice);
cudaMemcpy(output_image, outputImage, imageSize, cudaMemcpyHostToDevice);
cudaMemcpy(kernel_filter, filter, filterSize, cudaMemcpyHostToDevice);
dim3 dimBlock(8, 8);
dim3 dimGrid(imageWidth / dimBlock.x, imageHeight / dimBlock.y);
convolution <<<dimGrid, dimBlock>>>(source_image, output_image, kernel_filter, imageWidth, imageHeight, filterWidth);
// cudaMemcpy(, result, size, cudaMemcpyDeviceToHost);
// cudaFree(result);
}
|
e7e58c8bc761d4745c8e933da295625ce54a4525.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/filling.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/device_buffer.hpp>
#include <type_traits>
enum DispatchingType { HOST_DISPATCHING, DEVICE_DISPATCHING, NO_DISPATCHING };
enum FunctorType { BANDWIDTH_BOUND, COMPUTE_BOUND };
template <class NotFloat, FunctorType ft, class DisableNotFloat = void>
struct Functor {
static __device__ NotFloat f(NotFloat x) { return x; }
};
template <class Float, FunctorType ft>
struct Functor<Float, ft, std::enable_if_t<std::is_floating_point_v<Float>>> {
static __device__ Float f(Float x)
{
if (ft == BANDWIDTH_BOUND) {
return x + static_cast<Float>(1) - static_cast<Float>(1);
} else {
for (int i = 0; i < 1000; i++) {
x = (x * x + static_cast<Float>(1)) - x * x - static_cast<Float>(1);
}
return x;
}
}
};
constexpr int block_size = 256;
// This is for NO_DISPATCHING
template <FunctorType functor_type, class T>
__global__ void no_dispatching_kernel(T** A, cudf::size_type n_rows, cudf::size_type n_cols)
{
using F = Functor<T, functor_type>;
cudf::size_type index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < n_rows) {
for (int c = 0; c < n_cols; c++) {
A[c][index] = F::f(A[c][index]);
}
index += blockDim.x * gridDim.x;
}
}
// This is for HOST_DISPATCHING
template <FunctorType functor_type, class T>
__global__ void host_dispatching_kernel(cudf::mutable_column_device_view source_column)
{
using F = Functor<T, functor_type>;
T* A = source_column.data<T>();
cudf::size_type index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < source_column.size()) {
A[index] = F::f(A[index]);
index += blockDim.x * gridDim.x;
}
}
template <FunctorType functor_type>
struct ColumnHandle {
template <typename ColumnType, CUDF_ENABLE_IF(cudf::is_rep_layout_compatible<ColumnType>())>
void operator()(cudf::mutable_column_device_view source_column, int work_per_thread)
{
cudf::detail::grid_1d grid_config{source_column.size(), block_size};
int grid_size = grid_config.num_blocks;
// Launch the kernel.
hipLaunchKernelGGL(( host_dispatching_kernel<functor_type, ColumnType>), dim3(grid_size), dim3(block_size), 0, 0, source_column);
}
template <typename ColumnType, CUDF_ENABLE_IF(not cudf::is_rep_layout_compatible<ColumnType>())>
void operator()(cudf::mutable_column_device_view source_column, int work_per_thread)
{
CUDF_FAIL("Invalid type to benchmark.");
}
};
// The following is for DEVICE_DISPATCHING:
// The dispatching is done on device. The loop loops over
// each row (across different columns). Type is dispatched each time
// a column is visited so the total number of dispatching is
// n_rows * n_cols.
template <FunctorType functor_type>
struct RowHandle {
template <typename T, CUDF_ENABLE_IF(cudf::is_rep_layout_compatible<T>())>
__device__ void operator()(cudf::mutable_column_device_view source, cudf::size_type index)
{
using F = Functor<T, functor_type>;
source.data<T>()[index] = F::f(source.data<T>()[index]);
}
template <typename T, CUDF_ENABLE_IF(not cudf::is_rep_layout_compatible<T>())>
__device__ void operator()(cudf::mutable_column_device_view source, cudf::size_type index)
{
CUDF_UNREACHABLE("Unsupported type.");
}
};
// This is for DEVICE_DISPATCHING
template <FunctorType functor_type>
__global__ void device_dispatching_kernel(cudf::mutable_table_device_view source)
{
const cudf::size_type n_rows = source.num_rows();
cudf::size_type index = threadIdx.x + blockIdx.x * blockDim.x;
while (index < n_rows) {
for (cudf::size_type i = 0; i < source.num_columns(); i++) {
cudf::type_dispatcher(
source.column(i).type(), RowHandle<functor_type>{}, source.column(i), index);
}
index += blockDim.x * gridDim.x;
} // while
}
template <FunctorType functor_type, DispatchingType dispatching_type, class T>
void launch_kernel(cudf::mutable_table_view input, T** d_ptr, int work_per_thread)
{
const cudf::size_type n_rows = input.num_rows();
const cudf::size_type n_cols = input.num_columns();
cudf::detail::grid_1d grid_config{n_rows, block_size};
int grid_size = grid_config.num_blocks;
if (dispatching_type == HOST_DISPATCHING) {
// std::vector<cudf::util::cuda::scoped_stream> v_stream(n_cols);
for (int c = 0; c < n_cols; c++) {
auto d_column = cudf::mutable_column_device_view::create(input.column(c));
cudf::type_dispatcher(
d_column->type(), ColumnHandle<functor_type>{}, *d_column, work_per_thread);
}
} else if (dispatching_type == DEVICE_DISPATCHING) {
auto d_table_view = cudf::mutable_table_device_view::create(input);
auto f = device_dispatching_kernel<functor_type>;
// Launch the kernel
hipLaunchKernelGGL(( f), dim3(grid_size), dim3(block_size), 0, 0, *d_table_view);
} else if (dispatching_type == NO_DISPATCHING) {
auto f = no_dispatching_kernel<functor_type, T>;
// Launch the kernel
hipLaunchKernelGGL(( f), dim3(grid_size), dim3(block_size), 0, 0, d_ptr, n_rows, n_cols);
}
}
template <class TypeParam, FunctorType functor_type, DispatchingType dispatching_type>
void type_dispatcher_benchmark(::benchmark::State& state)
{
const auto n_cols = static_cast<cudf::size_type>(state.range(0));
const auto source_size = static_cast<cudf::size_type>(state.range(1));
const auto work_per_thread = static_cast<cudf::size_type>(state.range(2));
auto init = cudf::make_fixed_width_scalar<TypeParam>(static_cast<TypeParam>(0));
std::vector<std::unique_ptr<cudf::column>> source_column_wrappers;
std::vector<cudf::mutable_column_view> source_columns;
for (int i = 0; i < n_cols; ++i) {
source_column_wrappers.push_back(cudf::sequence(source_size, *init));
source_columns.push_back(*source_column_wrappers[i]);
}
cudf::mutable_table_view source_table{source_columns};
// For no dispatching
std::vector<rmm::device_buffer> h_vec(n_cols);
std::vector<TypeParam*> h_vec_p(n_cols);
std::transform(h_vec.begin(), h_vec.end(), h_vec_p.begin(), [source_size](auto& col) {
col.resize(source_size * sizeof(TypeParam), cudf::get_default_stream());
return static_cast<TypeParam*>(col.data());
});
rmm::device_uvector<TypeParam*> d_vec(n_cols, cudf::get_default_stream());
if (dispatching_type == NO_DISPATCHING) {
CUDF_CUDA_TRY(hipMemcpy(
d_vec.data(), h_vec_p.data(), sizeof(TypeParam*) * n_cols, hipMemcpyHostToDevice));
}
// Warm up
launch_kernel<functor_type, dispatching_type>(source_table, d_vec.data(), work_per_thread);
CUDF_CUDA_TRY(hipDeviceSynchronize());
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
launch_kernel<functor_type, dispatching_type>(source_table, d_vec.data(), work_per_thread);
}
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * source_size * n_cols * 2 *
sizeof(TypeParam));
}
class TypeDispatcher : public cudf::benchmark {
};
#define TBM_BENCHMARK_DEFINE(name, TypeParam, functor_type, dispatching_type) \
BENCHMARK_DEFINE_F(TypeDispatcher, name)(::benchmark::State & state) \
{ \
type_dispatcher_benchmark<TypeParam, functor_type, dispatching_type>(state); \
} \
BENCHMARK_REGISTER_F(TypeDispatcher, name) \
->RangeMultiplier(2) \
->Ranges({{1, 8}, {1 << 10, 1 << 26}, {1, 1}}) \
->UseManualTime();
TBM_BENCHMARK_DEFINE(fp64_bandwidth_host, double, BANDWIDTH_BOUND, HOST_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_bandwidth_device, double, BANDWIDTH_BOUND, DEVICE_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_bandwidth_no, double, BANDWIDTH_BOUND, NO_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_compute_host, double, COMPUTE_BOUND, HOST_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_compute_device, double, COMPUTE_BOUND, DEVICE_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_compute_no, double, COMPUTE_BOUND, NO_DISPATCHING);
| e7e58c8bc761d4745c8e933da295625ce54a4525.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/filling.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/device_buffer.hpp>
#include <type_traits>
enum DispatchingType { HOST_DISPATCHING, DEVICE_DISPATCHING, NO_DISPATCHING };
enum FunctorType { BANDWIDTH_BOUND, COMPUTE_BOUND };
template <class NotFloat, FunctorType ft, class DisableNotFloat = void>
struct Functor {
static __device__ NotFloat f(NotFloat x) { return x; }
};
template <class Float, FunctorType ft>
struct Functor<Float, ft, std::enable_if_t<std::is_floating_point_v<Float>>> {
static __device__ Float f(Float x)
{
if (ft == BANDWIDTH_BOUND) {
return x + static_cast<Float>(1) - static_cast<Float>(1);
} else {
for (int i = 0; i < 1000; i++) {
x = (x * x + static_cast<Float>(1)) - x * x - static_cast<Float>(1);
}
return x;
}
}
};
constexpr int block_size = 256;
// This is for NO_DISPATCHING
template <FunctorType functor_type, class T>
__global__ void no_dispatching_kernel(T** A, cudf::size_type n_rows, cudf::size_type n_cols)
{
using F = Functor<T, functor_type>;
cudf::size_type index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < n_rows) {
for (int c = 0; c < n_cols; c++) {
A[c][index] = F::f(A[c][index]);
}
index += blockDim.x * gridDim.x;
}
}
// This is for HOST_DISPATCHING
template <FunctorType functor_type, class T>
__global__ void host_dispatching_kernel(cudf::mutable_column_device_view source_column)
{
using F = Functor<T, functor_type>;
T* A = source_column.data<T>();
cudf::size_type index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < source_column.size()) {
A[index] = F::f(A[index]);
index += blockDim.x * gridDim.x;
}
}
template <FunctorType functor_type>
struct ColumnHandle {
template <typename ColumnType, CUDF_ENABLE_IF(cudf::is_rep_layout_compatible<ColumnType>())>
void operator()(cudf::mutable_column_device_view source_column, int work_per_thread)
{
cudf::detail::grid_1d grid_config{source_column.size(), block_size};
int grid_size = grid_config.num_blocks;
// Launch the kernel.
host_dispatching_kernel<functor_type, ColumnType><<<grid_size, block_size>>>(source_column);
}
template <typename ColumnType, CUDF_ENABLE_IF(not cudf::is_rep_layout_compatible<ColumnType>())>
void operator()(cudf::mutable_column_device_view source_column, int work_per_thread)
{
CUDF_FAIL("Invalid type to benchmark.");
}
};
// The following is for DEVICE_DISPATCHING:
// The dispatching is done on device. The loop loops over
// each row (across different columns). Type is dispatched each time
// a column is visited so the total number of dispatching is
// n_rows * n_cols.
template <FunctorType functor_type>
struct RowHandle {
template <typename T, CUDF_ENABLE_IF(cudf::is_rep_layout_compatible<T>())>
__device__ void operator()(cudf::mutable_column_device_view source, cudf::size_type index)
{
using F = Functor<T, functor_type>;
source.data<T>()[index] = F::f(source.data<T>()[index]);
}
template <typename T, CUDF_ENABLE_IF(not cudf::is_rep_layout_compatible<T>())>
__device__ void operator()(cudf::mutable_column_device_view source, cudf::size_type index)
{
CUDF_UNREACHABLE("Unsupported type.");
}
};
// This is for DEVICE_DISPATCHING
template <FunctorType functor_type>
__global__ void device_dispatching_kernel(cudf::mutable_table_device_view source)
{
const cudf::size_type n_rows = source.num_rows();
cudf::size_type index = threadIdx.x + blockIdx.x * blockDim.x;
while (index < n_rows) {
for (cudf::size_type i = 0; i < source.num_columns(); i++) {
cudf::type_dispatcher(
source.column(i).type(), RowHandle<functor_type>{}, source.column(i), index);
}
index += blockDim.x * gridDim.x;
} // while
}
template <FunctorType functor_type, DispatchingType dispatching_type, class T>
void launch_kernel(cudf::mutable_table_view input, T** d_ptr, int work_per_thread)
{
const cudf::size_type n_rows = input.num_rows();
const cudf::size_type n_cols = input.num_columns();
cudf::detail::grid_1d grid_config{n_rows, block_size};
int grid_size = grid_config.num_blocks;
if (dispatching_type == HOST_DISPATCHING) {
// std::vector<cudf::util::cuda::scoped_stream> v_stream(n_cols);
for (int c = 0; c < n_cols; c++) {
auto d_column = cudf::mutable_column_device_view::create(input.column(c));
cudf::type_dispatcher(
d_column->type(), ColumnHandle<functor_type>{}, *d_column, work_per_thread);
}
} else if (dispatching_type == DEVICE_DISPATCHING) {
auto d_table_view = cudf::mutable_table_device_view::create(input);
auto f = device_dispatching_kernel<functor_type>;
// Launch the kernel
f<<<grid_size, block_size>>>(*d_table_view);
} else if (dispatching_type == NO_DISPATCHING) {
auto f = no_dispatching_kernel<functor_type, T>;
// Launch the kernel
f<<<grid_size, block_size>>>(d_ptr, n_rows, n_cols);
}
}
template <class TypeParam, FunctorType functor_type, DispatchingType dispatching_type>
void type_dispatcher_benchmark(::benchmark::State& state)
{
const auto n_cols = static_cast<cudf::size_type>(state.range(0));
const auto source_size = static_cast<cudf::size_type>(state.range(1));
const auto work_per_thread = static_cast<cudf::size_type>(state.range(2));
auto init = cudf::make_fixed_width_scalar<TypeParam>(static_cast<TypeParam>(0));
std::vector<std::unique_ptr<cudf::column>> source_column_wrappers;
std::vector<cudf::mutable_column_view> source_columns;
for (int i = 0; i < n_cols; ++i) {
source_column_wrappers.push_back(cudf::sequence(source_size, *init));
source_columns.push_back(*source_column_wrappers[i]);
}
cudf::mutable_table_view source_table{source_columns};
// For no dispatching
std::vector<rmm::device_buffer> h_vec(n_cols);
std::vector<TypeParam*> h_vec_p(n_cols);
std::transform(h_vec.begin(), h_vec.end(), h_vec_p.begin(), [source_size](auto& col) {
col.resize(source_size * sizeof(TypeParam), cudf::get_default_stream());
return static_cast<TypeParam*>(col.data());
});
rmm::device_uvector<TypeParam*> d_vec(n_cols, cudf::get_default_stream());
if (dispatching_type == NO_DISPATCHING) {
CUDF_CUDA_TRY(cudaMemcpy(
d_vec.data(), h_vec_p.data(), sizeof(TypeParam*) * n_cols, cudaMemcpyHostToDevice));
}
// Warm up
launch_kernel<functor_type, dispatching_type>(source_table, d_vec.data(), work_per_thread);
CUDF_CUDA_TRY(cudaDeviceSynchronize());
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
launch_kernel<functor_type, dispatching_type>(source_table, d_vec.data(), work_per_thread);
}
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * source_size * n_cols * 2 *
sizeof(TypeParam));
}
class TypeDispatcher : public cudf::benchmark {
};
#define TBM_BENCHMARK_DEFINE(name, TypeParam, functor_type, dispatching_type) \
BENCHMARK_DEFINE_F(TypeDispatcher, name)(::benchmark::State & state) \
{ \
type_dispatcher_benchmark<TypeParam, functor_type, dispatching_type>(state); \
} \
BENCHMARK_REGISTER_F(TypeDispatcher, name) \
->RangeMultiplier(2) \
->Ranges({{1, 8}, {1 << 10, 1 << 26}, {1, 1}}) \
->UseManualTime();
TBM_BENCHMARK_DEFINE(fp64_bandwidth_host, double, BANDWIDTH_BOUND, HOST_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_bandwidth_device, double, BANDWIDTH_BOUND, DEVICE_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_bandwidth_no, double, BANDWIDTH_BOUND, NO_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_compute_host, double, COMPUTE_BOUND, HOST_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_compute_device, double, COMPUTE_BOUND, DEVICE_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_compute_no, double, COMPUTE_BOUND, NO_DISPATCHING);
|
b5197bfccee0dca57abb85ea5653a283885e8a1e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define USE_LDG
#include "diffusion/diffusion_cuda_shared.h"
#include "common/cuda_util.h"
namespace diffusion {
namespace cuda_shared5 {
#define GET(x) (x)
#define diffusion_backward() \
do { \
sb[ps] = s2; \
__syncthreads(); \
f2[p-xy] = cc * s2 \
+ cw * sb[ps+sb_w] + ce * sb[ps+sb_e] \
+ cs * sb[ps+sb_s] + cn * sb[ps+sb_n] + cb*s1 + ct*s3; \
} while (0)
// Temporal blocking
// z blocking
// the diagonal points are loaded by the vertical warp
__global__ void kernel3d(F1_DECL f1, F2_DECL f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
extern __shared__ REAL sb[];
const int sbx = BLOCK_X+4;
const int tidx = threadIdx.x % BLOCK_X;
const int tidy = threadIdx.x / BLOCK_X - 1;
int i = BLOCK_X * blockIdx.x + tidx;
int j = BLOCK_Y * blockIdx.y + tidy;
j = (j < 0) ? 0 : j; // max(j, 0)
j = (j == ny) ? ny - 1 : j; // min(j, ny-1)
int xy = nx * ny;
const int block_z = nz / gridDim.z;
int k = (blockIdx.z == 0) ? 0:
block_z * blockIdx.z - 1;
const int k_end = (blockIdx.z == gridDim.z-1) ? nz:
block_z * (blockIdx.z + 1) + 1;
int p = i + j * nx + k *xy;
int ps = tidx+2 + (tidy+1) * sbx;
if (tidy == -1) {
int s = (j == 0) ? 0 : -nx;
float t2 = GET(f1[p]);
float t1 = (k == 0) ? t2 : GET(f1[p-xy]);
float t3 = (k < nz-1) ? GET(f1[p+xy]) : t2;
sb[ps] = t2;
__syncthreads();
float s2, s3;
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * GET(f1[p+s])
+ cn * sb[ps+sbx] + cb*t1 + ct*t3;
p += xy;
__syncthreads();
++k;
if (k != 1) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * GET(f1[p+s])
+ cn * sb[ps+sbx] + cb*t1 + ct*t3;
__syncthreads();
p += xy;
++k;
}
for (; k < k_end; ++k) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * GET(f1[p+s])
+ cn * sb[ps+sbx] + cb*t1 + ct*t3;
__syncthreads();
sb[ps] = s2;
__syncthreads();
__syncthreads();
p += xy;
}
if (k == nz) {
s2 = s3;
sb[ps] = s2;
__syncthreads();
}
} else if (tidy == BLOCK_Y) {
int n = (j == ny-1) ? 0 : nx;
float t2 = GET(f1[p]);
float t1 = (k == 0) ? t2 : GET(f1[p-xy]);
float t3 = (k < nz-1) ? GET(f1[p+xy]) : t2;
sb[ps] = t2;
__syncthreads();
float s2, s3;
s2 = s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx] + cn * GET(f1[p+n]) + cb*t1 + ct*t3;
p += xy;
__syncthreads();
++k;
if (k != 1) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx] + cn * GET(f1[p+n]) + cb*t1 + ct*t3;
p += xy;
__syncthreads();
++k;
}
for (; k < k_end; ++k) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx] + cn * GET(f1[p+n]) + cb*t1 + ct*t3;
__syncthreads();
sb[ps] = s2;
__syncthreads();
__syncthreads();
p += xy;
}
if (k == nz) {
s2 = s3;
sb[ps] = s2;
__syncthreads();
}
} else if (tidy >= 0 && tidy < BLOCK_Y) {
int sb_s = (j == 0) ? 0: -sbx;
int sb_n = (j == ny-1) ? 0: sbx;
int sb_w = (i == 0) ? 0: -1;
int sb_e = (i == nx-1) ? 0: 1;
float t2 = GET(f1[p]);
float t1 = (k == 0) ? t2 : GET(f1[p-xy]);
float t3 = (k < nz-1) ? GET(f1[p+xy]) : t2;
sb[ps] = t2;
__syncthreads();
float s1, s2, s3;
s2 = s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx]+ cn * sb[ps+sbx]
+ cb * t1 + ct * t3;
p += xy;
__syncthreads();
++k;
if (k != 1) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
SHIFT3(s1, s2, s3);
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx]+ cn * sb[ps+sbx]
+ cb * t1 + ct * t3;
p += xy;
__syncthreads();
++k;
}
for (; k < k_end; ++k) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
SHIFT3(s1, s2, s3);
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx]+ cn * sb[ps+sbx]
+ cb * t1 + ct * t3;
__syncthreads();
diffusion_backward();
__syncthreads();
p += xy;
}
if (k == nz) {
SHIFT3(s1, s2, s3);
diffusion_backward();
}
} else {
// horizontal halo
int xoffset = (tidx & 1) + ((tidx & 2) >> 1) * (BLOCK_X + 2);
int yoffset = tidx >> 2;
yoffset = (yoffset >= (BLOCK_Y + 2)) ? BLOCK_Y+1 : yoffset;
i = BLOCK_X * blockIdx.x - 2 + xoffset;
i = (i < 0) ? 0 : i;
i = (i >= nx) ? nx - 1 : i;
j = BLOCK_Y * blockIdx.y -1 + yoffset;
j = (j < 0) ? 0 : j; // max(j, 0)
j = (j >= ny) ? ny - 1 : j; // min(j, ny-1)
int s = (yoffset == 0) ? 0 : -sbx;
int n = (yoffset == BLOCK_Y+1) ? 0 : sbx;
int w = (xoffset == 0) ? 0 : -1;
int e = (xoffset == sbx-1) ? 0 : 1;
p = i + j * nx + k * xy;
ps = xoffset + yoffset * sbx;
float t2 = GET(f1[p]);
float t1 = (k == 0) ? t2 : GET(f1[p-xy]);
float t3 = (k < nz-1) ? GET(f1[p+xy]) : t2;
sb[ps] = t2;
__syncthreads();
float s2, s3;
s2 = s3 = cc * t2
+ cw * sb[ps+w] + ce * sb[ps+e]
+ cs * sb[ps+s] + cn * sb[ps+n]
+ cb*t1 + ct*t3;
__syncthreads();
p += xy;
++k;
if (k != 1) {
SHIFT3(t1, t2, t3);
t3 = GET(f1[p+xy]);
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps+w] + ce * sb[ps+e]
+ cs * sb[ps+s] + cn * sb[ps+n]
+ cb*t1 + ct*t3;
__syncthreads();
p += xy;
++k;
}
for (; k < k_end-1; ++k) {
SHIFT3(t1, t2, t3);
t3 = GET(f1[p+xy]);
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps+w] + ce * sb[ps+e]
+ cs * sb[ps+s] + cn * sb[ps+n]
+ cb*t1 + ct*t3;
__syncthreads();
sb[ps] = s2;
__syncthreads();
__syncthreads();
p += xy;
}
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps+w] + ce * sb[ps+e]
+ cs * sb[ps+s] + cn * sb[ps+n]
+ cb*t1 + ct*t3;
__syncthreads();
sb[ps] = s2;
__syncthreads();
__syncthreads();
p += xy;
++k;
if (k == nz) {
s2 = s3;
sb[ps] = s2;
__syncthreads();
}
}
return;
}
} // namespace cuda_shared5
void DiffusionCUDAShared5::RunKernel(int count) {
size_t s = sizeof(REAL) * nx_ * ny_ * nz_;
FORCE_CHECK_CUDA(hipMemcpy(f1_d_, f1_, s, hipMemcpyHostToDevice));
assert(count % 2 == 0);
//dim3 block_dim(BLOCK_X * BLOCK_Y + 32); // + 1 warp
dim3 block_dim(BLOCK_X * (BLOCK_Y+2) + 32);
dim3 grid_dim(nx_ / BLOCK_X, ny_ / BLOCK_Y, grid_z_);
CHECK_CUDA(hipEventRecord(ev1_));
for (int i = 0; i < count; i+=2) {
hipLaunchKernelGGL(( cuda_shared5::kernel3d), dim3(grid_dim), dim3(block_dim),
(BLOCK_X+4)*(BLOCK_Y+2)*sizeof(float), 0,
f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
REAL *t = f1_d_;
f1_d_ = f2_d_;
f2_d_ = t;
}
CHECK_CUDA(hipEventRecord(ev2_));
FORCE_CHECK_CUDA(hipMemcpy(f1_, f1_d_, s, hipMemcpyDeviceToHost));
return;
}
void DiffusionCUDAShared5::Setup() {
DiffusionCUDA::Setup();
FORCE_CHECK_CUDA(hipFuncSetCacheConfig(cuda_shared5::kernel3d,
hipFuncCachePreferShared));
}
}
| b5197bfccee0dca57abb85ea5653a283885e8a1e.cu | #define USE_LDG
#include "diffusion/diffusion_cuda_shared.h"
#include "common/cuda_util.h"
namespace diffusion {
namespace cuda_shared5 {
#define GET(x) (x)
#define diffusion_backward() \
do { \
sb[ps] = s2; \
__syncthreads(); \
f2[p-xy] = cc * s2 \
+ cw * sb[ps+sb_w] + ce * sb[ps+sb_e] \
+ cs * sb[ps+sb_s] + cn * sb[ps+sb_n] + cb*s1 + ct*s3; \
} while (0)
// Temporal blocking
// z blocking
// the diagonal points are loaded by the vertical warp
__global__ void kernel3d(F1_DECL f1, F2_DECL f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
extern __shared__ REAL sb[];
const int sbx = BLOCK_X+4;
const int tidx = threadIdx.x % BLOCK_X;
const int tidy = threadIdx.x / BLOCK_X - 1;
int i = BLOCK_X * blockIdx.x + tidx;
int j = BLOCK_Y * blockIdx.y + tidy;
j = (j < 0) ? 0 : j; // max(j, 0)
j = (j == ny) ? ny - 1 : j; // min(j, ny-1)
int xy = nx * ny;
const int block_z = nz / gridDim.z;
int k = (blockIdx.z == 0) ? 0:
block_z * blockIdx.z - 1;
const int k_end = (blockIdx.z == gridDim.z-1) ? nz:
block_z * (blockIdx.z + 1) + 1;
int p = i + j * nx + k *xy;
int ps = tidx+2 + (tidy+1) * sbx;
if (tidy == -1) {
int s = (j == 0) ? 0 : -nx;
float t2 = GET(f1[p]);
float t1 = (k == 0) ? t2 : GET(f1[p-xy]);
float t3 = (k < nz-1) ? GET(f1[p+xy]) : t2;
sb[ps] = t2;
__syncthreads();
float s2, s3;
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * GET(f1[p+s])
+ cn * sb[ps+sbx] + cb*t1 + ct*t3;
p += xy;
__syncthreads();
++k;
if (k != 1) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * GET(f1[p+s])
+ cn * sb[ps+sbx] + cb*t1 + ct*t3;
__syncthreads();
p += xy;
++k;
}
for (; k < k_end; ++k) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * GET(f1[p+s])
+ cn * sb[ps+sbx] + cb*t1 + ct*t3;
__syncthreads();
sb[ps] = s2;
__syncthreads();
__syncthreads();
p += xy;
}
if (k == nz) {
s2 = s3;
sb[ps] = s2;
__syncthreads();
}
} else if (tidy == BLOCK_Y) {
int n = (j == ny-1) ? 0 : nx;
float t2 = GET(f1[p]);
float t1 = (k == 0) ? t2 : GET(f1[p-xy]);
float t3 = (k < nz-1) ? GET(f1[p+xy]) : t2;
sb[ps] = t2;
__syncthreads();
float s2, s3;
s2 = s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx] + cn * GET(f1[p+n]) + cb*t1 + ct*t3;
p += xy;
__syncthreads();
++k;
if (k != 1) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx] + cn * GET(f1[p+n]) + cb*t1 + ct*t3;
p += xy;
__syncthreads();
++k;
}
for (; k < k_end; ++k) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx] + cn * GET(f1[p+n]) + cb*t1 + ct*t3;
__syncthreads();
sb[ps] = s2;
__syncthreads();
__syncthreads();
p += xy;
}
if (k == nz) {
s2 = s3;
sb[ps] = s2;
__syncthreads();
}
} else if (tidy >= 0 && tidy < BLOCK_Y) {
int sb_s = (j == 0) ? 0: -sbx;
int sb_n = (j == ny-1) ? 0: sbx;
int sb_w = (i == 0) ? 0: -1;
int sb_e = (i == nx-1) ? 0: 1;
float t2 = GET(f1[p]);
float t1 = (k == 0) ? t2 : GET(f1[p-xy]);
float t3 = (k < nz-1) ? GET(f1[p+xy]) : t2;
sb[ps] = t2;
__syncthreads();
float s1, s2, s3;
s2 = s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx]+ cn * sb[ps+sbx]
+ cb * t1 + ct * t3;
p += xy;
__syncthreads();
++k;
if (k != 1) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
SHIFT3(s1, s2, s3);
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx]+ cn * sb[ps+sbx]
+ cb * t1 + ct * t3;
p += xy;
__syncthreads();
++k;
}
for (; k < k_end; ++k) {
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
SHIFT3(s1, s2, s3);
__syncthreads();
s3 = cc * t2
+ cw * sb[ps-1] + ce * sb[ps+1]
+ cs * sb[ps-sbx]+ cn * sb[ps+sbx]
+ cb * t1 + ct * t3;
__syncthreads();
diffusion_backward();
__syncthreads();
p += xy;
}
if (k == nz) {
SHIFT3(s1, s2, s3);
diffusion_backward();
}
} else {
// horizontal halo
int xoffset = (tidx & 1) + ((tidx & 2) >> 1) * (BLOCK_X + 2);
int yoffset = tidx >> 2;
yoffset = (yoffset >= (BLOCK_Y + 2)) ? BLOCK_Y+1 : yoffset;
i = BLOCK_X * blockIdx.x - 2 + xoffset;
i = (i < 0) ? 0 : i;
i = (i >= nx) ? nx - 1 : i;
j = BLOCK_Y * blockIdx.y -1 + yoffset;
j = (j < 0) ? 0 : j; // max(j, 0)
j = (j >= ny) ? ny - 1 : j; // min(j, ny-1)
int s = (yoffset == 0) ? 0 : -sbx;
int n = (yoffset == BLOCK_Y+1) ? 0 : sbx;
int w = (xoffset == 0) ? 0 : -1;
int e = (xoffset == sbx-1) ? 0 : 1;
p = i + j * nx + k * xy;
ps = xoffset + yoffset * sbx;
float t2 = GET(f1[p]);
float t1 = (k == 0) ? t2 : GET(f1[p-xy]);
float t3 = (k < nz-1) ? GET(f1[p+xy]) : t2;
sb[ps] = t2;
__syncthreads();
float s2, s3;
s2 = s3 = cc * t2
+ cw * sb[ps+w] + ce * sb[ps+e]
+ cs * sb[ps+s] + cn * sb[ps+n]
+ cb*t1 + ct*t3;
__syncthreads();
p += xy;
++k;
if (k != 1) {
SHIFT3(t1, t2, t3);
t3 = GET(f1[p+xy]);
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps+w] + ce * sb[ps+e]
+ cs * sb[ps+s] + cn * sb[ps+n]
+ cb*t1 + ct*t3;
__syncthreads();
p += xy;
++k;
}
for (; k < k_end-1; ++k) {
SHIFT3(t1, t2, t3);
t3 = GET(f1[p+xy]);
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps+w] + ce * sb[ps+e]
+ cs * sb[ps+s] + cn * sb[ps+n]
+ cb*t1 + ct*t3;
__syncthreads();
sb[ps] = s2;
__syncthreads();
__syncthreads();
p += xy;
}
SHIFT3(t1, t2, t3);
t3 = (k < nz-1) ? GET(f1[p+xy]) : t3;
sb[ps] = t2;
s2 = s3;
__syncthreads();
s3 = cc * t2
+ cw * sb[ps+w] + ce * sb[ps+e]
+ cs * sb[ps+s] + cn * sb[ps+n]
+ cb*t1 + ct*t3;
__syncthreads();
sb[ps] = s2;
__syncthreads();
__syncthreads();
p += xy;
++k;
if (k == nz) {
s2 = s3;
sb[ps] = s2;
__syncthreads();
}
}
return;
}
} // namespace cuda_shared5
void DiffusionCUDAShared5::RunKernel(int count) {
size_t s = sizeof(REAL) * nx_ * ny_ * nz_;
FORCE_CHECK_CUDA(cudaMemcpy(f1_d_, f1_, s, cudaMemcpyHostToDevice));
assert(count % 2 == 0);
//dim3 block_dim(BLOCK_X * BLOCK_Y + 32); // + 1 warp
dim3 block_dim(BLOCK_X * (BLOCK_Y+2) + 32);
dim3 grid_dim(nx_ / BLOCK_X, ny_ / BLOCK_Y, grid_z_);
CHECK_CUDA(cudaEventRecord(ev1_));
for (int i = 0; i < count; i+=2) {
cuda_shared5::kernel3d<<<grid_dim, block_dim,
(BLOCK_X+4)*(BLOCK_Y+2)*sizeof(float)>>>
(f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
REAL *t = f1_d_;
f1_d_ = f2_d_;
f2_d_ = t;
}
CHECK_CUDA(cudaEventRecord(ev2_));
FORCE_CHECK_CUDA(cudaMemcpy(f1_, f1_d_, s, cudaMemcpyDeviceToHost));
return;
}
void DiffusionCUDAShared5::Setup() {
DiffusionCUDA::Setup();
FORCE_CHECK_CUDA(cudaFuncSetCacheConfig(cuda_shared5::kernel3d,
cudaFuncCachePreferShared));
}
}
|
8d17003751bd1e6f765748ea833a58ce716dd5bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "DS_timer.h"
#define NUM_DATA (1024 * 1024)
#define NUM_BIN (256)
#define NUM_THREADS_IN_BLOCK 1024
__global__ void globalSync(float * d_a, int * d_b, int A_SIZE)
{
int TID = blockIdx.x * blockDim.x + threadIdx.x;
if(TID >= A_SIZE) return;
atomicAdd(d_b + int(d_a[TID]), 1);
}
__global__ void optimizedSharedMemory(float * d_a, int * d_b, int A_SIZE)
{
__shared__ int sh[NUM_BIN];
int TID = blockIdx.x * blockDim.x + threadIdx.x;
if(threadIdx.x < NUM_BIN) sh[threadIdx.x] = 0;
__syncthreads();
if(TID < A_SIZE) atomicAdd(&sh[int(d_a[TID])], 1);
__syncthreads();
if(threadIdx.x < NUM_BIN) atomicAdd(&d_b[threadIdx.x], sh[threadIdx.x]);
}
void serial(float * a, int * b, int A_SIZE)
{
for(int i=0 ; i<A_SIZE ; i++) {
b[int(a[i])]++;
}
}
void check(float * a, int * b, int A_SIZE, int B_SIZE)
{
int * temp = new int[B_SIZE]; memset(temp, 0, sizeof(int) * B_SIZE);
for(int i=0 ; i<A_SIZE ; i++) {
temp[int(a[i])]++;
}
bool success = true;
for(int i=0 ; i<B_SIZE ; i++) {
if(temp[i] != b[i]) {
printf("index %d : result not match your value : %d, but original value : %d\n", i, b[i], temp[i]);
success = false;
}
}
if(success) {
printf("match\n");
}
else {
printf("not match\n");
}
}
int main() {
DS_timer timer(8);
timer.initTimers();
float *a, *d_a;
int *b, *d_b;
int A_SIZE = NUM_DATA;
int B_SIZE = NUM_BIN;
int A_MEM_SIZE = A_SIZE * sizeof(float);
int B_MEM_SIZE = B_SIZE * sizeof(int);
a = new float[A_SIZE]; memset(a, 0, A_MEM_SIZE);
b = new int[B_SIZE]; memset(b, 0, B_MEM_SIZE);
for (int i = 0; i < A_SIZE; i++) {
a[i] = rand() / (float)RAND_MAX * 256.0f;
}
for (int i = 0; i < B_SIZE; i++) {
b[i] = 0;
}
// serial
timer.onTimer(0);
serial(a, b, A_SIZE);
timer.offTimer(0);
// serial result validation check
check(a, b, A_SIZE, B_SIZE);
// initial b array
memset(b, 0, B_MEM_SIZE);
// device global memory allocation
hipMalloc(&d_a, A_MEM_SIZE);
hipMalloc(&d_b, B_MEM_SIZE);
// memory cpy Host to Device
hipMemcpy(d_a, a, A_MEM_SIZE, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, B_MEM_SIZE, hipMemcpyHostToDevice);
timer.onTimer(1);
hipLaunchKernelGGL(( globalSync) , dim3(NUM_DATA / NUM_THREADS_IN_BLOCK), dim3(NUM_THREADS_IN_BLOCK) , 0, 0, d_a, d_b, A_SIZE);
hipDeviceSynchronize();
timer.offTimer(1);
hipMemcpy(b, d_b, B_MEM_SIZE, hipMemcpyDeviceToHost);
check(a, b, A_SIZE, B_SIZE);
// optimized shared memory version start
memset(b, 0, B_MEM_SIZE);
hipMemcpy(d_b, b, B_MEM_SIZE, hipMemcpyHostToDevice);
timer.onTimer(2);
hipLaunchKernelGGL(( optimizedSharedMemory) , dim3(NUM_DATA / NUM_THREADS_IN_BLOCK), dim3(NUM_THREADS_IN_BLOCK) , 0, 0, d_a, d_b, A_SIZE);
timer.offTimer(2);
hipMemcpy(b, d_b, B_MEM_SIZE, hipMemcpyDeviceToHost);
check(a, b, A_SIZE, B_SIZE);
// timer display
timer.setTimerName(0, "serial");
timer.setTimerName(1, "atomic");
timer.setTimerName(2, "shared");
timer.printTimer();
hipFree(d_a); hipFree(d_b);
delete[] a, delete[] b;
return 0;
}
| 8d17003751bd1e6f765748ea833a58ce716dd5bd.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "DS_timer.h"
#define NUM_DATA (1024 * 1024)
#define NUM_BIN (256)
#define NUM_THREADS_IN_BLOCK 1024
__global__ void globalSync(float * d_a, int * d_b, int A_SIZE)
{
int TID = blockIdx.x * blockDim.x + threadIdx.x;
if(TID >= A_SIZE) return;
atomicAdd(d_b + int(d_a[TID]), 1);
}
__global__ void optimizedSharedMemory(float * d_a, int * d_b, int A_SIZE)
{
__shared__ int sh[NUM_BIN];
int TID = blockIdx.x * blockDim.x + threadIdx.x;
if(threadIdx.x < NUM_BIN) sh[threadIdx.x] = 0;
__syncthreads();
if(TID < A_SIZE) atomicAdd(&sh[int(d_a[TID])], 1);
__syncthreads();
if(threadIdx.x < NUM_BIN) atomicAdd(&d_b[threadIdx.x], sh[threadIdx.x]);
}
void serial(float * a, int * b, int A_SIZE)
{
for(int i=0 ; i<A_SIZE ; i++) {
b[int(a[i])]++;
}
}
void check(float * a, int * b, int A_SIZE, int B_SIZE)
{
int * temp = new int[B_SIZE]; memset(temp, 0, sizeof(int) * B_SIZE);
for(int i=0 ; i<A_SIZE ; i++) {
temp[int(a[i])]++;
}
bool success = true;
for(int i=0 ; i<B_SIZE ; i++) {
if(temp[i] != b[i]) {
printf("index %d : result not match your value : %d, but original value : %d\n", i, b[i], temp[i]);
success = false;
}
}
if(success) {
printf("match\n");
}
else {
printf("not match\n");
}
}
int main() {
DS_timer timer(8);
timer.initTimers();
float *a, *d_a;
int *b, *d_b;
int A_SIZE = NUM_DATA;
int B_SIZE = NUM_BIN;
int A_MEM_SIZE = A_SIZE * sizeof(float);
int B_MEM_SIZE = B_SIZE * sizeof(int);
a = new float[A_SIZE]; memset(a, 0, A_MEM_SIZE);
b = new int[B_SIZE]; memset(b, 0, B_MEM_SIZE);
for (int i = 0; i < A_SIZE; i++) {
a[i] = rand() / (float)RAND_MAX * 256.0f;
}
for (int i = 0; i < B_SIZE; i++) {
b[i] = 0;
}
// serial
timer.onTimer(0);
serial(a, b, A_SIZE);
timer.offTimer(0);
// serial result validation check
check(a, b, A_SIZE, B_SIZE);
// initial b array
memset(b, 0, B_MEM_SIZE);
// device global memory allocation
cudaMalloc(&d_a, A_MEM_SIZE);
cudaMalloc(&d_b, B_MEM_SIZE);
// memory cpy Host to Device
cudaMemcpy(d_a, a, A_MEM_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, B_MEM_SIZE, cudaMemcpyHostToDevice);
timer.onTimer(1);
globalSync <<< NUM_DATA / NUM_THREADS_IN_BLOCK, NUM_THREADS_IN_BLOCK >>> (d_a, d_b, A_SIZE);
cudaThreadSynchronize();
timer.offTimer(1);
cudaMemcpy(b, d_b, B_MEM_SIZE, cudaMemcpyDeviceToHost);
check(a, b, A_SIZE, B_SIZE);
// optimized shared memory version start
memset(b, 0, B_MEM_SIZE);
cudaMemcpy(d_b, b, B_MEM_SIZE, cudaMemcpyHostToDevice);
timer.onTimer(2);
optimizedSharedMemory <<< NUM_DATA / NUM_THREADS_IN_BLOCK, NUM_THREADS_IN_BLOCK >>> (d_a, d_b, A_SIZE);
timer.offTimer(2);
cudaMemcpy(b, d_b, B_MEM_SIZE, cudaMemcpyDeviceToHost);
check(a, b, A_SIZE, B_SIZE);
// timer display
timer.setTimerName(0, "serial");
timer.setTimerName(1, "atomic");
timer.setTimerName(2, "shared");
timer.printTimer();
cudaFree(d_a); cudaFree(d_b);
delete[] a, delete[] b;
return 0;
}
|
0a2cb0d478d9fa1e0a76d4a9e56e20f2bcb17253.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_tea_leaf_common_init_kernel;
int xdim0_tea_leaf_common_init_kernel_h = -1;
int ydim0_tea_leaf_common_init_kernel_h = -1;
__constant__ int xdim1_tea_leaf_common_init_kernel;
int xdim1_tea_leaf_common_init_kernel_h = -1;
int ydim1_tea_leaf_common_init_kernel_h = -1;
__constant__ int xdim2_tea_leaf_common_init_kernel;
int xdim2_tea_leaf_common_init_kernel_h = -1;
int ydim2_tea_leaf_common_init_kernel_h = -1;
__constant__ int xdim3_tea_leaf_common_init_kernel;
int xdim3_tea_leaf_common_init_kernel_h = -1;
int ydim3_tea_leaf_common_init_kernel_h = -1;
__constant__ int xdim4_tea_leaf_common_init_kernel;
int xdim4_tea_leaf_common_init_kernel_h = -1;
int ydim4_tea_leaf_common_init_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#define OPS_ACC0(x,y) (x+xdim0_tea_leaf_common_init_kernel*(y))
#define OPS_ACC1(x,y) (x+xdim1_tea_leaf_common_init_kernel*(y))
#define OPS_ACC2(x,y) (x+xdim2_tea_leaf_common_init_kernel*(y))
#define OPS_ACC3(x,y) (x+xdim3_tea_leaf_common_init_kernel*(y))
#define OPS_ACC4(x,y) (x+xdim4_tea_leaf_common_init_kernel*(y))
//user function
__device__
void tea_leaf_common_init_kernel_gpu(double *w, double *r, const double *Kx, const double *Ky,
const double *u,const double *rx,const double *ry) {
w[OPS_ACC0(0,0)] = (1.0
+ (*ry)*(Ky[OPS_ACC3(0, 1)] + Ky[OPS_ACC3(0,0)])
+ (*rx)*(Kx[OPS_ACC2(1, 0)] + Kx[OPS_ACC2(0,0)]))*u[OPS_ACC4(0,0)]
- (*ry)*(Ky[OPS_ACC3(0, 1)] *u[OPS_ACC4(0, 1)] + Ky[OPS_ACC3(0,0)]*u[OPS_ACC4(0, -1)])
- (*rx)*(Kx[OPS_ACC2(1, 0)] *u[OPS_ACC4(1, 0)] + Kx[OPS_ACC2(0,0)]*u[OPS_ACC4(-1, 0)]);
r[OPS_ACC1(0,0)] = u[OPS_ACC4(0,0)] - w[OPS_ACC0(0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
__global__ void ops_tea_leaf_common_init_kernel(
double* __restrict arg0,
double* __restrict arg1,
const double* __restrict arg2,
const double* __restrict arg3,
const double* __restrict arg4,
const double arg5,
const double arg6,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_tea_leaf_common_init_kernel;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_tea_leaf_common_init_kernel;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_tea_leaf_common_init_kernel;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_tea_leaf_common_init_kernel;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_tea_leaf_common_init_kernel;
if (idx_x < size0 && idx_y < size1) {
tea_leaf_common_init_kernel_gpu(arg0, arg1, arg2, arg3,
arg4, &arg5, &arg6);
}
}
// host stub function
void ops_par_loop_tea_leaf_common_init_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6) {
//Timing
double t1,t2,c1,c2;
ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args,7,range,36)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(36,"tea_leaf_common_init_kernel");
OPS_kernels[36].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
if (xdim0 != xdim0_tea_leaf_common_init_kernel_h || xdim1 != xdim1_tea_leaf_common_init_kernel_h || xdim2 != xdim2_tea_leaf_common_init_kernel_h || xdim3 != xdim3_tea_leaf_common_init_kernel_h || xdim4 != xdim4_tea_leaf_common_init_kernel_h) {
hipMemcpyToSymbol( xdim0_tea_leaf_common_init_kernel, &xdim0, sizeof(int) );
xdim0_tea_leaf_common_init_kernel_h = xdim0;
hipMemcpyToSymbol( xdim1_tea_leaf_common_init_kernel, &xdim1, sizeof(int) );
xdim1_tea_leaf_common_init_kernel_h = xdim1;
hipMemcpyToSymbol( xdim2_tea_leaf_common_init_kernel, &xdim2, sizeof(int) );
xdim2_tea_leaf_common_init_kernel_h = xdim2;
hipMemcpyToSymbol( xdim3_tea_leaf_common_init_kernel, &xdim3, sizeof(int) );
xdim3_tea_leaf_common_init_kernel_h = xdim3;
hipMemcpyToSymbol( xdim4_tea_leaf_common_init_kernel, &xdim4, sizeof(int) );
xdim4_tea_leaf_common_init_kernel_h = xdim4;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
char *p_a[7];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 *
(start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 *
(start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 *
(start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]);
p_a[4] = (char *)args[4].data_d + base4;
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args,7,range);
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[36].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_tea_leaf_common_init_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], *(double *)arg5.data,
*(double *)arg6.data,x_size, y_size);
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[36].time += t1-t2;
}
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[36].mpi_time += t2-t1;
OPS_kernels[36].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[36].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[36].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[36].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[36].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
| 0a2cb0d478d9fa1e0a76d4a9e56e20f2bcb17253.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_tea_leaf_common_init_kernel;
int xdim0_tea_leaf_common_init_kernel_h = -1;
int ydim0_tea_leaf_common_init_kernel_h = -1;
__constant__ int xdim1_tea_leaf_common_init_kernel;
int xdim1_tea_leaf_common_init_kernel_h = -1;
int ydim1_tea_leaf_common_init_kernel_h = -1;
__constant__ int xdim2_tea_leaf_common_init_kernel;
int xdim2_tea_leaf_common_init_kernel_h = -1;
int ydim2_tea_leaf_common_init_kernel_h = -1;
__constant__ int xdim3_tea_leaf_common_init_kernel;
int xdim3_tea_leaf_common_init_kernel_h = -1;
int ydim3_tea_leaf_common_init_kernel_h = -1;
__constant__ int xdim4_tea_leaf_common_init_kernel;
int xdim4_tea_leaf_common_init_kernel_h = -1;
int ydim4_tea_leaf_common_init_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#define OPS_ACC0(x,y) (x+xdim0_tea_leaf_common_init_kernel*(y))
#define OPS_ACC1(x,y) (x+xdim1_tea_leaf_common_init_kernel*(y))
#define OPS_ACC2(x,y) (x+xdim2_tea_leaf_common_init_kernel*(y))
#define OPS_ACC3(x,y) (x+xdim3_tea_leaf_common_init_kernel*(y))
#define OPS_ACC4(x,y) (x+xdim4_tea_leaf_common_init_kernel*(y))
//user function
__device__
void tea_leaf_common_init_kernel_gpu(double *w, double *r, const double *Kx, const double *Ky,
const double *u,const double *rx,const double *ry) {
w[OPS_ACC0(0,0)] = (1.0
+ (*ry)*(Ky[OPS_ACC3(0, 1)] + Ky[OPS_ACC3(0,0)])
+ (*rx)*(Kx[OPS_ACC2(1, 0)] + Kx[OPS_ACC2(0,0)]))*u[OPS_ACC4(0,0)]
- (*ry)*(Ky[OPS_ACC3(0, 1)] *u[OPS_ACC4(0, 1)] + Ky[OPS_ACC3(0,0)]*u[OPS_ACC4(0, -1)])
- (*rx)*(Kx[OPS_ACC2(1, 0)] *u[OPS_ACC4(1, 0)] + Kx[OPS_ACC2(0,0)]*u[OPS_ACC4(-1, 0)]);
r[OPS_ACC1(0,0)] = u[OPS_ACC4(0,0)] - w[OPS_ACC0(0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
__global__ void ops_tea_leaf_common_init_kernel(
double* __restrict arg0,
double* __restrict arg1,
const double* __restrict arg2,
const double* __restrict arg3,
const double* __restrict arg4,
const double arg5,
const double arg6,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_tea_leaf_common_init_kernel;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_tea_leaf_common_init_kernel;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_tea_leaf_common_init_kernel;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_tea_leaf_common_init_kernel;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_tea_leaf_common_init_kernel;
if (idx_x < size0 && idx_y < size1) {
tea_leaf_common_init_kernel_gpu(arg0, arg1, arg2, arg3,
arg4, &arg5, &arg6);
}
}
// host stub function
void ops_par_loop_tea_leaf_common_init_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6) {
//Timing
double t1,t2,c1,c2;
ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args,7,range,36)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(36,"tea_leaf_common_init_kernel");
OPS_kernels[36].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
if (xdim0 != xdim0_tea_leaf_common_init_kernel_h || xdim1 != xdim1_tea_leaf_common_init_kernel_h || xdim2 != xdim2_tea_leaf_common_init_kernel_h || xdim3 != xdim3_tea_leaf_common_init_kernel_h || xdim4 != xdim4_tea_leaf_common_init_kernel_h) {
cudaMemcpyToSymbol( xdim0_tea_leaf_common_init_kernel, &xdim0, sizeof(int) );
xdim0_tea_leaf_common_init_kernel_h = xdim0;
cudaMemcpyToSymbol( xdim1_tea_leaf_common_init_kernel, &xdim1, sizeof(int) );
xdim1_tea_leaf_common_init_kernel_h = xdim1;
cudaMemcpyToSymbol( xdim2_tea_leaf_common_init_kernel, &xdim2, sizeof(int) );
xdim2_tea_leaf_common_init_kernel_h = xdim2;
cudaMemcpyToSymbol( xdim3_tea_leaf_common_init_kernel, &xdim3, sizeof(int) );
xdim3_tea_leaf_common_init_kernel_h = xdim3;
cudaMemcpyToSymbol( xdim4_tea_leaf_common_init_kernel, &xdim4, sizeof(int) );
xdim4_tea_leaf_common_init_kernel_h = xdim4;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
char *p_a[7];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 *
(start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 *
(start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 *
(start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]);
p_a[4] = (char *)args[4].data_d + base4;
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args,7,range);
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[36].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
ops_tea_leaf_common_init_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], *(double *)arg5.data,
*(double *)arg6.data,x_size, y_size);
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[36].time += t1-t2;
}
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[36].mpi_time += t2-t1;
OPS_kernels[36].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[36].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[36].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[36].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[36].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
|
2338a0d4a3fca4b4901bed75c6a50c9c3a4b6e80.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2017-2018 Roman Klassen
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
*/
#include "gpu_utils.cuh"
#include "hip/hip_runtime.h"
void* gc_malloc(size_t bufsize)
{
void* gpu_buf = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&gpu_buf, bufsize));
return gpu_buf;
}
void gc_free(void* gpu_buf)
{
CUDA_SAFE_CALL(hipFree(gpu_buf));
}
void* gc_host2device(gcStream_t Stream, void* cpu_buf, size_t bufsize)
{
void* gpu_buf = NULL;
unsigned int round_bufsize = CEIL(bufsize, 16) * 16 + 4;
CUDA_SAFE_CALL(hipMalloc((void**)&gpu_buf, round_bufsize));
CUDA_SAFE_CALL(hipMemcpy(gpu_buf, cpu_buf, bufsize, hipMemcpyHostToDevice));
return gpu_buf;
}
void* gc_device2host(gcStream_t Stream, void* gpu_buf, size_t bufsize)
{
void* pinned = NULL;
CUDA_SAFE_CALL(hipHostMalloc((void**)&pinned, bufsize));
CUDA_SAFE_CALL(hipMemcpyAsync(pinned, gpu_buf, bufsize, hipMemcpyDeviceToHost, (ihipStream_t*)Stream.stream));
return pinned;
}
int align_size(int size, int align_by)
{
int rest = size%align_by;
if (rest == 0) return size;
return size + rest;
}
void gc_stream_start(gcStream_t* Stream)
{
CUDA_SAFE_CALL(hipStreamCreate((hipStream_t*)&Stream->stream));
CUDA_SAFE_CALL(hipEventCreate((hipEvent_t*)&Stream->event));
CUDA_SAFE_CALL(hipEventCreate((hipEvent_t*)&Stream->start));
CUDA_SAFE_CALL(hipEventRecord((hipEvent_t)Stream->start, (hipStream_t)Stream->stream));
}
void gc_stream_stop(gcStream_t* Stream)
{
CUDA_SAFE_CALL(hipEventRecord((hipEvent_t)Stream->event, (hipStream_t)Stream->stream));
CUDA_SAFE_CALL(hipEventSynchronize((hipEvent_t)Stream->event));
float etime = 0.0f;
hipEventElapsedTime(&etime, (hipEvent_t)Stream->start, (hipEvent_t)Stream->event);
printf("***%f ms\n", etime);
CUDA_SAFE_CALL(hipEventDestroy((hipEvent_t)Stream->event));
CUDA_SAFE_CALL(hipEventDestroy((hipEvent_t)Stream->start));
CUDA_SAFE_CALL(hipStreamDestroy((hipStream_t)Stream->stream));
}
void gc_stream_wait(gcStream_t* Stream)
{
CUDA_SAFE_CALL(hipStreamSynchronize((hipStream_t)Stream->stream));
} | 2338a0d4a3fca4b4901bed75c6a50c9c3a4b6e80.cu | /*
* Copyright 2017-2018 Roman Klassen
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
*/
#include "gpu_utils.cuh"
#include "cuda_runtime.h"
void* gc_malloc(size_t bufsize)
{
void* gpu_buf = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_buf, bufsize));
return gpu_buf;
}
void gc_free(void* gpu_buf)
{
CUDA_SAFE_CALL(cudaFree(gpu_buf));
}
void* gc_host2device(gcStream_t Stream, void* cpu_buf, size_t bufsize)
{
void* gpu_buf = NULL;
unsigned int round_bufsize = CEIL(bufsize, 16) * 16 + 4;
CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_buf, round_bufsize));
CUDA_SAFE_CALL(cudaMemcpy(gpu_buf, cpu_buf, bufsize, cudaMemcpyHostToDevice));
return gpu_buf;
}
void* gc_device2host(gcStream_t Stream, void* gpu_buf, size_t bufsize)
{
void* pinned = NULL;
CUDA_SAFE_CALL(cudaMallocHost((void**)&pinned, bufsize));
CUDA_SAFE_CALL(cudaMemcpyAsync(pinned, gpu_buf, bufsize, cudaMemcpyDeviceToHost, (CUstream_st*)Stream.stream));
return pinned;
}
int align_size(int size, int align_by)
{
int rest = size%align_by;
if (rest == 0) return size;
return size + rest;
}
void gc_stream_start(gcStream_t* Stream)
{
CUDA_SAFE_CALL(cudaStreamCreate((cudaStream_t*)&Stream->stream));
CUDA_SAFE_CALL(cudaEventCreate((cudaEvent_t*)&Stream->event));
CUDA_SAFE_CALL(cudaEventCreate((cudaEvent_t*)&Stream->start));
CUDA_SAFE_CALL(cudaEventRecord((cudaEvent_t)Stream->start, (cudaStream_t)Stream->stream));
}
void gc_stream_stop(gcStream_t* Stream)
{
CUDA_SAFE_CALL(cudaEventRecord((cudaEvent_t)Stream->event, (cudaStream_t)Stream->stream));
CUDA_SAFE_CALL(cudaEventSynchronize((cudaEvent_t)Stream->event));
float etime = 0.0f;
cudaEventElapsedTime(&etime, (cudaEvent_t)Stream->start, (cudaEvent_t)Stream->event);
printf("***%f ms\n", etime);
CUDA_SAFE_CALL(cudaEventDestroy((cudaEvent_t)Stream->event));
CUDA_SAFE_CALL(cudaEventDestroy((cudaEvent_t)Stream->start));
CUDA_SAFE_CALL(cudaStreamDestroy((cudaStream_t)Stream->stream));
}
void gc_stream_wait(gcStream_t* Stream)
{
CUDA_SAFE_CALL(cudaStreamSynchronize((cudaStream_t)Stream->stream));
} |
9e26f72a344d61f764d1617d2c50747f62781afb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
template <typename T, typename C>
__global__ void
awkward_BitMaskedArray_to_ByteMaskedArray(T* tobytemask,
const C* frombitmask,
int64_t bitmasklength,
bool validwhen,
bool lsb_order,
uint64_t invocation_index,
uint64_t* err_code) {
uint64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if (err_code[0] == NO_ERROR) {
if (thread_id < bitmasklength) {
if (lsb_order) {
uint8_t byte = frombitmask[thread_id];
tobytemask[thread_id * 8 + 0] = ((byte & ((uint8_t)1)) != validwhen);
byte >>= 1;
tobytemask[thread_id * 8 + 1] = ((byte & ((uint8_t)1)) != validwhen);
byte >>= 1;
tobytemask[thread_id * 8 + 2] = ((byte & ((uint8_t)1)) != validwhen);
byte >>= 1;
tobytemask[thread_id * 8 + 3] = ((byte & ((uint8_t)1)) != validwhen);
byte >>= 1;
tobytemask[thread_id * 8 + 4] = ((byte & ((uint8_t)1)) != validwhen);
byte >>= 1;
tobytemask[thread_id * 8 + 5] = ((byte & ((uint8_t)1)) != validwhen);
byte >>= 1;
tobytemask[thread_id * 8 + 6] = ((byte & ((uint8_t)1)) != validwhen);
byte >>= 1;
tobytemask[thread_id * 8 + 7] = ((byte & ((uint8_t)1)) != validwhen);
} else {
uint8_t byte = frombitmask[thread_id];
tobytemask[thread_id * 8 + 0] =
(((byte & ((uint8_t)128)) != 0) != validwhen);
byte <<= 1;
tobytemask[thread_id * 8 + 1] =
(((byte & ((uint8_t)128)) != 0) != validwhen);
byte <<= 1;
tobytemask[thread_id * 8 + 2] =
(((byte & ((uint8_t)128)) != 0) != validwhen);
byte <<= 1;
tobytemask[thread_id * 8 + 3] =
(((byte & ((uint8_t)128)) != 0) != validwhen);
byte <<= 1;
tobytemask[thread_id * 8 + 4] =
(((byte & ((uint8_t)128)) != 0) != validwhen);
byte <<= 1;
tobytemask[thread_id * 8 + 5] =
(((byte & ((uint8_t)128)) != 0) != validwhen);
byte <<= 1;
tobytemask[thread_id * 8 + 6] =
(((byte & ((uint8_t)128)) != 0) != validwhen);
byte <<= 1;
tobytemask[thread_id * 8 + 7] =
(((byte & ((uint8_t)128)) != 0) != validwhen);
}
}
}
}
| 9e26f72a344d61f764d1617d2c50747f62781afb.cu | // BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
template <typename T, typename C>
__global__ void
awkward_BitMaskedArray_to_ByteMaskedArray(T* tobytemask,
const C* frombitmask,
int64_t bitmasklength,
bool validwhen,
bool lsb_order,
uint64_t invocation_index,
uint64_t* err_code) {
uint64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if (err_code[0] == NO_ERROR) {
if (thread_id < bitmasklength) {
if (lsb_order) {
uint8_t byte = frombitmask[thread_id];
tobytemask[thread_id * 8 + 0] = ((byte & ((uint8_t)1)) != validwhen);
byte >>= 1;
tobytemask[thread_id * 8 + 1] = ((byte & ((uint8_t)1)) != validwhen);
byte >>= 1;
tobytemask[thread_id * 8 + 2] = ((byte & ((uint8_t)1)) != validwhen);
byte >>= 1;
tobytemask[thread_id * 8 + 3] = ((byte & ((uint8_t)1)) != validwhen);
byte >>= 1;
tobytemask[thread_id * 8 + 4] = ((byte & ((uint8_t)1)) != validwhen);
byte >>= 1;
tobytemask[thread_id * 8 + 5] = ((byte & ((uint8_t)1)) != validwhen);
byte >>= 1;
tobytemask[thread_id * 8 + 6] = ((byte & ((uint8_t)1)) != validwhen);
byte >>= 1;
tobytemask[thread_id * 8 + 7] = ((byte & ((uint8_t)1)) != validwhen);
} else {
uint8_t byte = frombitmask[thread_id];
tobytemask[thread_id * 8 + 0] =
(((byte & ((uint8_t)128)) != 0) != validwhen);
byte <<= 1;
tobytemask[thread_id * 8 + 1] =
(((byte & ((uint8_t)128)) != 0) != validwhen);
byte <<= 1;
tobytemask[thread_id * 8 + 2] =
(((byte & ((uint8_t)128)) != 0) != validwhen);
byte <<= 1;
tobytemask[thread_id * 8 + 3] =
(((byte & ((uint8_t)128)) != 0) != validwhen);
byte <<= 1;
tobytemask[thread_id * 8 + 4] =
(((byte & ((uint8_t)128)) != 0) != validwhen);
byte <<= 1;
tobytemask[thread_id * 8 + 5] =
(((byte & ((uint8_t)128)) != 0) != validwhen);
byte <<= 1;
tobytemask[thread_id * 8 + 6] =
(((byte & ((uint8_t)128)) != 0) != validwhen);
byte <<= 1;
tobytemask[thread_id * 8 + 7] =
(((byte & ((uint8_t)128)) != 0) != validwhen);
}
}
}
}
|
3b1fd55c327eb6e422315217b9e9db29687866bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zgeadd_batched.cu, normal z -> d, Tue Aug 30 09:38:38 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/******************************************************************************/
/*
Batches dlacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
dgeadd_batched_kernel(
int m, int n,
double alpha,
const double * const *dAarray, int ldda,
double **dBarray, int lddb )
{
// dA and dB iterate across row i
const double *dA = dAarray[ blockIdx.y ];
double *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const double *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i],
for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
alpha DOUBLE PRECISION
The scalar alpha.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a DOUBLE PRECISION array, dimension (LDDA,N)
The m by n matrices dAarray[i].
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[in,out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a DOUBLE PRECISION array, dimension (LDDB,N)
The m by n matrices dBarray[i].
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd_batched
*******************************************************************************/
extern "C" void
magmablas_dgeadd_batched(
magma_int_t m, magma_int_t n,
double alpha,
magmaDouble_const_ptr const dAarray[], magma_int_t ldda,
magmaDouble_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ), batchCount );
hipLaunchKernelGGL(( dgeadd_batched_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, dAarray, ldda, dBarray, lddb );
}
| 3b1fd55c327eb6e422315217b9e9db29687866bd.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zgeadd_batched.cu, normal z -> d, Tue Aug 30 09:38:38 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/******************************************************************************/
/*
Batches dlacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
dgeadd_batched_kernel(
int m, int n,
double alpha,
const double * const *dAarray, int ldda,
double **dBarray, int lddb )
{
// dA and dB iterate across row i
const double *dA = dAarray[ blockIdx.y ];
double *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const double *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i],
for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
alpha DOUBLE PRECISION
The scalar alpha.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a DOUBLE PRECISION array, dimension (LDDA,N)
The m by n matrices dAarray[i].
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[in,out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a DOUBLE PRECISION array, dimension (LDDB,N)
The m by n matrices dBarray[i].
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd_batched
*******************************************************************************/
extern "C" void
magmablas_dgeadd_batched(
magma_int_t m, magma_int_t n,
double alpha,
magmaDouble_const_ptr const dAarray[], magma_int_t ldda,
magmaDouble_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ), batchCount );
dgeadd_batched_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(
m, n, alpha, dAarray, ldda, dBarray, lddb );
}
|
f5e337c26f11e0043904276b66803e4a99903e82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES.
// All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* Copyright (c) Chris Choy (chrischoy@ai.stanford.edu).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
// Use the torch for GPU memory management. Thrust resize gives segfulat during
// debugging -g #include <torch/extension.h>
#include "convolution_hip.cuh"
#include "../../utils.h"
#include <THH/THHAtomics.cuh>
namespace kaolin {
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <typename Dtype, typename Itype, int BLOCK_SIZE>
__global__ void matmul(const Dtype *A, const int wA, const int hA,
const Dtype *B, const int wB, const int hB, Dtype *C,
const Itype *in_map, const Itype *out_map) {
// Use in_feat as A and kernel as B
// Block index
const int bx = blockIdx.x;
const int by = blockIdx.y;
// Thread index
const int tx = threadIdx.x;
const int ty = threadIdx.y;
// Coordinate. x is for rows, y is for columns.
const int x = BLOCK_SIZE * bx + tx;
const int y = BLOCK_SIZE * by + ty;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
Dtype Csub = 0;
const Itype in_row = y < hA ? in_map[y] : 0;
const Itype out_row = y < hA ? out_map[y] : 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int s = 0; s < wA; s += BLOCK_SIZE) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ Dtype As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ Dtype Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = ((s + tx) < wA && y < hA) ? A[wA * in_row + s + tx] : 0;
Bs[ty][tx] = ((s + ty) < hB && x < wB) ? B[wB * (s + ty) + x] : 0;
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
if (y < hA && x < wB)
atomicAdd(&C[wB * out_row + x], Csub);
// C[wB * out_row + x] += Csub;
}
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B^T, E = D^T * A
* wA is A's width and wB is B's width
*
* +---+
* |B^T|
* +-------+
* | | |
* | A | C |
* | | |
* | | |
* +------------------+
* | D^T | E |
* +----------+---+
*
*/
template <typename Dtype, typename Itype, int BLOCK_SIZE>
__global__ void matmul2(const Dtype *A, const int wA, const int hA,
const Dtype *B, const int wB, const int hB,
const Dtype *D, const int wD, const int hD, Dtype *C,
Dtype *E, const Itype *in_map, const Itype *out_map) {
// Use grad_out_feat as A, transposed kernel weight as B, and in_feat as D
// Block index
const int bx = blockIdx.x;
const int by = blockIdx.y;
// Thread index
const int tx = threadIdx.x;
const int ty = threadIdx.y;
// Coordinate. y is for rows, x is for columns.
const int x = BLOCK_SIZE * bx + tx;
const int y = BLOCK_SIZE * by + ty;
const Itype in_row = y < hA ? in_map[y] : 0;
const Itype out_row = y < hA ? out_map[y] : 0;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
Dtype Csub = 0;
Dtype Esub = 0;
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ Dtype As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ Dtype BTs[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Ds used to
// store the sub-matrix of D
__shared__ Dtype DTs[BLOCK_SIZE][BLOCK_SIZE];
// For Ds = D^T[...:..., ...:...], use the transposed grid dimension for A
DTs[ty][tx] = (x < wD && y < hD) ? D[wD * in_row + x] : 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int s = 0; s < wA; s += BLOCK_SIZE) {
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = ((s + tx) < wA && y < hA) ? A[wA * out_row + s + tx] : 0;
// Transposed kernel
BTs[ty][tx] = ((s + ty) < wB && x < hB) ? B[wB * x + s + ty] : 0;
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * BTs[k][tx];
}
// For Esub, reset to 0
Esub = 0;
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Esub += DTs[k][ty] * As[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
// For the E matrix which requires accmulation of multiple blocks, use
// atomic addition. This can be replaced with a more sophisticaed reduction
// algorithm.
if ((bx * BLOCK_SIZE + ty) < wD && (s + tx) < wA)
atomicAdd(&E[wA * (bx * BLOCK_SIZE + ty) + (s + tx)], Esub);
}
// Write the block sub-matrix to device memory;
// each thread writes one element
if (y < hA && x < hB)
atomicAdd(&C[hB * in_row + x], Csub);
}
namespace minkowski {
template <typename Dtype, typename Itype>
void ConvolutionForwardKernelGPU(const Dtype *d_in_feat, int in_nchannel,
Dtype *d_out_feat, int out_nchannel,
const Dtype *d_kernel,
const pInOutMaps<Itype> &in_maps,
const pInOutMaps<Itype> &out_maps,
int out_nrows, hipblasHandle_t cuhandle,
hipStream_t stream) {
CUDA_CHECK(hipDeviceSynchronize());
int n_active_in_volume, shared_mem_size = -1;
// Define the shared memory size
if ((in_nchannel > 16 && out_nchannel > 16 &&
in_nchannel * out_nchannel >= 512) ||
(in_nchannel > 24 && out_nchannel > 24))
shared_mem_size = 32;
else if (in_nchannel % 24 == 0 && out_nchannel % 24 == 0)
shared_mem_size = 24;
else if ((in_nchannel > 8 && out_nchannel > 8) ||
(in_nchannel % 16 == 0 && out_nchannel % 16 == 0))
shared_mem_size = 16;
else
shared_mem_size = 8;
dim3 threads(shared_mem_size, shared_mem_size);
// Iterate through each spatial kernel and get indices for in_map and out_map
for (int k = 0; k < in_maps.size(); k++) {
n_active_in_volume = in_maps[k].size();
if (n_active_in_volume == 0)
continue;
int num_grid = (n_active_in_volume + shared_mem_size - 1) / shared_mem_size;
int num_div = (num_grid + MAX_GRID - 1) / MAX_GRID;
int step = (n_active_in_volume + num_div - 1) / num_div;
for (int s = 0; s < num_div; s++) {
int offset = step * s;
int remainder = n_active_in_volume - step * s;
int curr_num_active = remainder < step ? remainder : step;
dim3 grid((out_nchannel + threads.x - 1) / threads.x,
(curr_num_active + threads.y - 1) / threads.y);
switch (shared_mem_size) {
case 32:
matmul<Dtype, Itype, 32> << <grid, threads, 0, stream >> > (
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 24:
matmul<Dtype, Itype, 24> << <grid, threads, 0, stream >> > (
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 16:
matmul<Dtype, Itype, 16> << <grid, threads, 0, stream >> > (
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 8:
matmul<Dtype, Itype, 8> << <grid, threads, 0, stream >> > (
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
}
}
CUDA_CHECK(hipGetLastError());
}
CUDA_CHECK(hipDeviceSynchronize());
}
template void ConvolutionForwardKernelGPU<float, int32_t>(
const float *d_in_feat, int in_nchannel, float *d_out_feat,
int out_nchannel, const float *d_kernel, const pInOutMaps<int32_t> &in_map,
const pInOutMaps<int32_t> &out_map, int out_nrows, hipblasHandle_t cuhandle,
hipStream_t stream);
template void ConvolutionForwardKernelGPU<double, int32_t>(
const double *d_in_feat, int in_nchannel, double *d_out_feat,
int out_nchannel, const double *d_kernel, const pInOutMaps<int32_t> &in_map,
const pInOutMaps<int32_t> &out_map, int out_nrows, hipblasHandle_t cuhandle,
hipStream_t stream);
template <typename Dtype, typename Itype>
void ConvolutionBackwardKernelGPU(const Dtype *d_in_feat, Dtype *d_grad_in_feat,
int in_nchannel, const Dtype *d_grad_out_feat,
int out_nchannel, const Dtype *d_kernel,
Dtype *d_grad_kernel,
const pInOutMaps<Itype> &in_maps,
const pInOutMaps<Itype> &out_maps,
int out_nrows, hipblasHandle_t cuhandle,
hipStream_t stream) {
CUDA_CHECK(hipDeviceSynchronize());
int n_active_in_volume, shared_mem_size = -1;
// Define the shared memory size
if ((in_nchannel > 16 && out_nchannel > 16 &&
in_nchannel * out_nchannel >= 512) ||
(in_nchannel % 32 == 0 && out_nchannel % 32 == 0))
shared_mem_size = 32;
else if (in_nchannel % 24 == 0 && out_nchannel % 24 == 0)
shared_mem_size = 24;
else if ((in_nchannel > 8 && out_nchannel > 8) ||
(in_nchannel % 16 == 0 && out_nchannel % 16 == 0))
shared_mem_size = 16;
else
shared_mem_size = 8;
dim3 threads(shared_mem_size, shared_mem_size);
for (int k = 0; k < in_maps.size(); k++) {
n_active_in_volume = in_maps[k].size();
if (n_active_in_volume == 0)
continue;
int num_grid = (n_active_in_volume + shared_mem_size - 1) / shared_mem_size;
int num_div = (num_grid + MAX_GRID - 1) / MAX_GRID;
int step = (n_active_in_volume + num_div - 1) / num_div;
for (int s = 0; s < num_div; s++) {
int offset = step * s;
int remainder = n_active_in_volume - step * s;
int curr_num_active = remainder < step ? remainder : step;
dim3 grid((in_nchannel + threads.x - 1) / threads.x,
(curr_num_active + threads.y - 1) / threads.y);
switch (shared_mem_size) {
case 32:
matmul2<Dtype, Itype, 32> << <grid, threads, 0, stream >> > (
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 24:
matmul2<Dtype, Itype, 24> << <grid, threads, 0, stream >> > (
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 16:
matmul2<Dtype, Itype, 16> << <grid, threads, 0, stream >> > (
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 8:
matmul2<Dtype, Itype, 8> << <grid, threads, 0, stream >> > (
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
}
}
CUDA_CHECK(hipGetLastError());
}
CUDA_CHECK(hipDeviceSynchronize());
}
template void ConvolutionBackwardKernelGPU<float, int32_t>(
const float *d_in_feat, float *d_grad_in_feat, int in_nchannel,
const float *d_grad_out_feat, int out_nchannel, const float *d_kernel,
float *p_grad_kernel, const pInOutMaps<int32_t> &in_map,
const pInOutMaps<int32_t> &out_map, int out_nrows, hipblasHandle_t cuhandle,
hipStream_t stream);
template void ConvolutionBackwardKernelGPU<double, int32_t>(
const double *d_in_feat, double *d_grad_in_feat, int in_nchannel,
const double *d_grad_out_feat, int out_nchannel, const double *d_kernel,
double *p_grad_kernel, const pInOutMaps<int32_t> &in_map,
const pInOutMaps<int32_t> &out_map, int out_nrows, hipblasHandle_t cuhandle,
hipStream_t stream);
} // end namespace minkowski
} // namespace kaolin
| f5e337c26f11e0043904276b66803e4a99903e82.cu | // Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES.
// All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* Copyright (c) Chris Choy (chrischoy@ai.stanford.edu).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
// Use the torch for GPU memory management. Thrust resize gives segfulat during
// debugging -g #include <torch/extension.h>
#include "convolution.cuh"
#include "../../utils.h"
#include <THC/THCAtomics.cuh>
namespace kaolin {
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <typename Dtype, typename Itype, int BLOCK_SIZE>
__global__ void matmul(const Dtype *A, const int wA, const int hA,
const Dtype *B, const int wB, const int hB, Dtype *C,
const Itype *in_map, const Itype *out_map) {
// Use in_feat as A and kernel as B
// Block index
const int bx = blockIdx.x;
const int by = blockIdx.y;
// Thread index
const int tx = threadIdx.x;
const int ty = threadIdx.y;
// Coordinate. x is for rows, y is for columns.
const int x = BLOCK_SIZE * bx + tx;
const int y = BLOCK_SIZE * by + ty;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
Dtype Csub = 0;
const Itype in_row = y < hA ? in_map[y] : 0;
const Itype out_row = y < hA ? out_map[y] : 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int s = 0; s < wA; s += BLOCK_SIZE) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ Dtype As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ Dtype Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = ((s + tx) < wA && y < hA) ? A[wA * in_row + s + tx] : 0;
Bs[ty][tx] = ((s + ty) < hB && x < wB) ? B[wB * (s + ty) + x] : 0;
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
if (y < hA && x < wB)
atomicAdd(&C[wB * out_row + x], Csub);
// C[wB * out_row + x] += Csub;
}
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B^T, E = D^T * A
* wA is A's width and wB is B's width
*
* +---+
* |B^T|
* +-------+
* | | |
* | A | C |
* | | |
* | | |
* +------------------+
* | D^T | E |
* +----------+---+
*
*/
template <typename Dtype, typename Itype, int BLOCK_SIZE>
__global__ void matmul2(const Dtype *A, const int wA, const int hA,
const Dtype *B, const int wB, const int hB,
const Dtype *D, const int wD, const int hD, Dtype *C,
Dtype *E, const Itype *in_map, const Itype *out_map) {
// Use grad_out_feat as A, transposed kernel weight as B, and in_feat as D
// Block index
const int bx = blockIdx.x;
const int by = blockIdx.y;
// Thread index
const int tx = threadIdx.x;
const int ty = threadIdx.y;
// Coordinate. y is for rows, x is for columns.
const int x = BLOCK_SIZE * bx + tx;
const int y = BLOCK_SIZE * by + ty;
const Itype in_row = y < hA ? in_map[y] : 0;
const Itype out_row = y < hA ? out_map[y] : 0;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
Dtype Csub = 0;
Dtype Esub = 0;
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ Dtype As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ Dtype BTs[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Ds used to
// store the sub-matrix of D
__shared__ Dtype DTs[BLOCK_SIZE][BLOCK_SIZE];
// For Ds = D^T[...:..., ...:...], use the transposed grid dimension for A
DTs[ty][tx] = (x < wD && y < hD) ? D[wD * in_row + x] : 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int s = 0; s < wA; s += BLOCK_SIZE) {
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = ((s + tx) < wA && y < hA) ? A[wA * out_row + s + tx] : 0;
// Transposed kernel
BTs[ty][tx] = ((s + ty) < wB && x < hB) ? B[wB * x + s + ty] : 0;
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * BTs[k][tx];
}
// For Esub, reset to 0
Esub = 0;
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Esub += DTs[k][ty] * As[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
// For the E matrix which requires accmulation of multiple blocks, use
// atomic addition. This can be replaced with a more sophisticaed reduction
// algorithm.
if ((bx * BLOCK_SIZE + ty) < wD && (s + tx) < wA)
atomicAdd(&E[wA * (bx * BLOCK_SIZE + ty) + (s + tx)], Esub);
}
// Write the block sub-matrix to device memory;
// each thread writes one element
if (y < hA && x < hB)
atomicAdd(&C[hB * in_row + x], Csub);
}
namespace minkowski {
template <typename Dtype, typename Itype>
void ConvolutionForwardKernelGPU(const Dtype *d_in_feat, int in_nchannel,
Dtype *d_out_feat, int out_nchannel,
const Dtype *d_kernel,
const pInOutMaps<Itype> &in_maps,
const pInOutMaps<Itype> &out_maps,
int out_nrows, cublasHandle_t cuhandle,
cudaStream_t stream) {
CUDA_CHECK(cudaDeviceSynchronize());
int n_active_in_volume, shared_mem_size = -1;
// Define the shared memory size
if ((in_nchannel > 16 && out_nchannel > 16 &&
in_nchannel * out_nchannel >= 512) ||
(in_nchannel > 24 && out_nchannel > 24))
shared_mem_size = 32;
else if (in_nchannel % 24 == 0 && out_nchannel % 24 == 0)
shared_mem_size = 24;
else if ((in_nchannel > 8 && out_nchannel > 8) ||
(in_nchannel % 16 == 0 && out_nchannel % 16 == 0))
shared_mem_size = 16;
else
shared_mem_size = 8;
dim3 threads(shared_mem_size, shared_mem_size);
// Iterate through each spatial kernel and get indices for in_map and out_map
for (int k = 0; k < in_maps.size(); k++) {
n_active_in_volume = in_maps[k].size();
if (n_active_in_volume == 0)
continue;
int num_grid = (n_active_in_volume + shared_mem_size - 1) / shared_mem_size;
int num_div = (num_grid + MAX_GRID - 1) / MAX_GRID;
int step = (n_active_in_volume + num_div - 1) / num_div;
for (int s = 0; s < num_div; s++) {
int offset = step * s;
int remainder = n_active_in_volume - step * s;
int curr_num_active = remainder < step ? remainder : step;
dim3 grid((out_nchannel + threads.x - 1) / threads.x,
(curr_num_active + threads.y - 1) / threads.y);
switch (shared_mem_size) {
case 32:
matmul<Dtype, Itype, 32> << <grid, threads, 0, stream >> > (
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 24:
matmul<Dtype, Itype, 24> << <grid, threads, 0, stream >> > (
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 16:
matmul<Dtype, Itype, 16> << <grid, threads, 0, stream >> > (
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 8:
matmul<Dtype, Itype, 8> << <grid, threads, 0, stream >> > (
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
}
}
CUDA_CHECK(cudaGetLastError());
}
CUDA_CHECK(cudaDeviceSynchronize());
}
template void ConvolutionForwardKernelGPU<float, int32_t>(
const float *d_in_feat, int in_nchannel, float *d_out_feat,
int out_nchannel, const float *d_kernel, const pInOutMaps<int32_t> &in_map,
const pInOutMaps<int32_t> &out_map, int out_nrows, cublasHandle_t cuhandle,
cudaStream_t stream);
template void ConvolutionForwardKernelGPU<double, int32_t>(
const double *d_in_feat, int in_nchannel, double *d_out_feat,
int out_nchannel, const double *d_kernel, const pInOutMaps<int32_t> &in_map,
const pInOutMaps<int32_t> &out_map, int out_nrows, cublasHandle_t cuhandle,
cudaStream_t stream);
template <typename Dtype, typename Itype>
void ConvolutionBackwardKernelGPU(const Dtype *d_in_feat, Dtype *d_grad_in_feat,
int in_nchannel, const Dtype *d_grad_out_feat,
int out_nchannel, const Dtype *d_kernel,
Dtype *d_grad_kernel,
const pInOutMaps<Itype> &in_maps,
const pInOutMaps<Itype> &out_maps,
int out_nrows, cublasHandle_t cuhandle,
cudaStream_t stream) {
CUDA_CHECK(cudaDeviceSynchronize());
int n_active_in_volume, shared_mem_size = -1;
// Define the shared memory size
if ((in_nchannel > 16 && out_nchannel > 16 &&
in_nchannel * out_nchannel >= 512) ||
(in_nchannel % 32 == 0 && out_nchannel % 32 == 0))
shared_mem_size = 32;
else if (in_nchannel % 24 == 0 && out_nchannel % 24 == 0)
shared_mem_size = 24;
else if ((in_nchannel > 8 && out_nchannel > 8) ||
(in_nchannel % 16 == 0 && out_nchannel % 16 == 0))
shared_mem_size = 16;
else
shared_mem_size = 8;
dim3 threads(shared_mem_size, shared_mem_size);
for (int k = 0; k < in_maps.size(); k++) {
n_active_in_volume = in_maps[k].size();
if (n_active_in_volume == 0)
continue;
int num_grid = (n_active_in_volume + shared_mem_size - 1) / shared_mem_size;
int num_div = (num_grid + MAX_GRID - 1) / MAX_GRID;
int step = (n_active_in_volume + num_div - 1) / num_div;
for (int s = 0; s < num_div; s++) {
int offset = step * s;
int remainder = n_active_in_volume - step * s;
int curr_num_active = remainder < step ? remainder : step;
dim3 grid((in_nchannel + threads.x - 1) / threads.x,
(curr_num_active + threads.y - 1) / threads.y);
switch (shared_mem_size) {
case 32:
matmul2<Dtype, Itype, 32> << <grid, threads, 0, stream >> > (
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 24:
matmul2<Dtype, Itype, 24> << <grid, threads, 0, stream >> > (
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 16:
matmul2<Dtype, Itype, 16> << <grid, threads, 0, stream >> > (
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 8:
matmul2<Dtype, Itype, 8> << <grid, threads, 0, stream >> > (
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
}
}
CUDA_CHECK(cudaGetLastError());
}
CUDA_CHECK(cudaDeviceSynchronize());
}
template void ConvolutionBackwardKernelGPU<float, int32_t>(
const float *d_in_feat, float *d_grad_in_feat, int in_nchannel,
const float *d_grad_out_feat, int out_nchannel, const float *d_kernel,
float *p_grad_kernel, const pInOutMaps<int32_t> &in_map,
const pInOutMaps<int32_t> &out_map, int out_nrows, cublasHandle_t cuhandle,
cudaStream_t stream);
template void ConvolutionBackwardKernelGPU<double, int32_t>(
const double *d_in_feat, double *d_grad_in_feat, int in_nchannel,
const double *d_grad_out_feat, int out_nchannel, const double *d_kernel,
double *p_grad_kernel, const pInOutMaps<int32_t> &in_map,
const pInOutMaps<int32_t> &out_map, int out_nrows, cublasHandle_t cuhandle,
cudaStream_t stream);
} // end namespace minkowski
} // namespace kaolin
|
027836ae2c21f94db1e3f3b5ebff33cc38cd9387.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zlascl2.cu, normal z -> c, Sun Nov 20 20:20:29 2016
@author Theo Mary
*/
#include "magma_internal.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
clascl2_full(int m, int n, const float* D, magmaFloatComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
clascl2_lower(int m, int n, const float* D, magmaFloatComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
clascl2_upper(int m, int n, const float *D, magmaFloatComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/***************************************************************************//**
Purpose
-------
CLASCL2 scales the M by N complex matrix A by the real diagonal matrix dD.
TYPE specifies that A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
dD REAL vector, dimension (M)
The diagonal matrix containing the scalar factors. Stored as a vector.
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@see magma_clascl_diag
@ingroup magma_lascl_diag
*******************************************************************************/
extern "C" void
magmablas_clascl2(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr dD,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -5;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( magma_ceildiv( m, NB ) );
dim3 threads( NB );
if (type == MagmaLower) {
hipLaunchKernelGGL(( clascl2_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda);
}
else if (type == MagmaUpper) {
hipLaunchKernelGGL(( clascl2_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda);
}
else if (type == MagmaFull) {
hipLaunchKernelGGL(( clascl2_full) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda);
}
}
| 027836ae2c21f94db1e3f3b5ebff33cc38cd9387.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zlascl2.cu, normal z -> c, Sun Nov 20 20:20:29 2016
@author Theo Mary
*/
#include "magma_internal.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
clascl2_full(int m, int n, const float* D, magmaFloatComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
clascl2_lower(int m, int n, const float* D, magmaFloatComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
clascl2_upper(int m, int n, const float *D, magmaFloatComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/***************************************************************************//**
Purpose
-------
CLASCL2 scales the M by N complex matrix A by the real diagonal matrix dD.
TYPE specifies that A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
dD REAL vector, dimension (M)
The diagonal matrix containing the scalar factors. Stored as a vector.
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@see magma_clascl_diag
@ingroup magma_lascl_diag
*******************************************************************************/
extern "C" void
magmablas_clascl2(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr dD,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -5;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( magma_ceildiv( m, NB ) );
dim3 threads( NB );
if (type == MagmaLower) {
clascl2_lower <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda);
}
else if (type == MagmaUpper) {
clascl2_upper <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda);
}
else if (type == MagmaFull) {
clascl2_full <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda);
}
}
|
847ba91eacf35b423e9c4cf27ac7ccfd9cf2e747.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void average_snips(const double *Params, const int *ioff, const int *id, const float *uproj, const float *cmax, const int *iList, float *cf, float *WU){
int tid, bid, ind, Nspikes, Nfeatures, NfeatW, Nnearest, t;
float xsum = 0.0f, pm;
Nspikes = (int) Params[0];
Nfeatures = (int) Params[1];
pm = (float) Params[3];
NfeatW = (int) Params[4];
Nnearest = (int) Params[6];
tid = threadIdx.x;
bid = blockIdx.x;
for(ind=0; ind<Nspikes;ind++)
if (id[ind]==bid){
xsum = uproj[tid + Nfeatures * ind];
WU[tid + ioff[ind] + NfeatW * bid] = pm * WU[tid + ioff[ind] + NfeatW * bid]
+ (1-pm) * xsum;
// go through the top 10 nearest filters and match them
for (t=0;t<Nnearest;t++)
cf[ind + t*Nspikes] = cmax[ind + Nspikes * iList[t + Nnearest*bid]];
}
} | 847ba91eacf35b423e9c4cf27ac7ccfd9cf2e747.cu | #include "includes.h"
__global__ void average_snips(const double *Params, const int *ioff, const int *id, const float *uproj, const float *cmax, const int *iList, float *cf, float *WU){
int tid, bid, ind, Nspikes, Nfeatures, NfeatW, Nnearest, t;
float xsum = 0.0f, pm;
Nspikes = (int) Params[0];
Nfeatures = (int) Params[1];
pm = (float) Params[3];
NfeatW = (int) Params[4];
Nnearest = (int) Params[6];
tid = threadIdx.x;
bid = blockIdx.x;
for(ind=0; ind<Nspikes;ind++)
if (id[ind]==bid){
xsum = uproj[tid + Nfeatures * ind];
WU[tid + ioff[ind] + NfeatW * bid] = pm * WU[tid + ioff[ind] + NfeatW * bid]
+ (1-pm) * xsum;
// go through the top 10 nearest filters and match them
for (t=0;t<Nnearest;t++)
cf[ind + t*Nspikes] = cmax[ind + Nspikes * iList[t + Nnearest*bid]];
}
} |
deb06c01b524845aa7ede9fc784a75bdaa30f776.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgesellcmv.cu normal z -> c, Wed Sep 17 15:08:43 2014
*/
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
#define PRECISION_c
// SELLC SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
__global__ void
cgesellcmv_kernel( int num_rows,
int num_cols,
int blocksize,
magmaFloatComplex alpha,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
magmaFloatComplex *d_y)
{
// threads assigned to rows
int Idx = blockDim.x * blockIdx.x + threadIdx.x ;
int offset = d_rowptr[ blockIdx.x ];
int border = (d_rowptr[ blockIdx.x+1 ]-offset)/blocksize;
if(Idx < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++){
int col = d_colind [offset+ blocksize * n + threadIdx.x ];
magmaFloatComplex val = d_val[offset+ blocksize * n + threadIdx.x];
if( val != 0){
dot=dot+val*d_x[col];
}
}
d_y[ Idx ] = dot * alpha + beta * d_y [ Idx ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is SELLC/SELLP.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
blocksize magma_int_t
number of rows in one ELL-slice
@param
slices magma_int_t
number of slices in matrix
@param
alignment magma_int_t
number of threads assigned to one row (=1)
@param
alpha magmaFloatComplex
scalar multiplier
@param
d_val magmaFloatComplex*
array containing values of A in SELLC/P
@param
d_colind magma_int_t*
columnindices of A in SELLC/P
@param
d_rowptr magma_int_t*
rowpointer of SELLP
@param
d_x magmaFloatComplex*
input vector x
@param
beta magmaFloatComplex
scalar multiplier
@param
d_y magmaFloatComplex*
input/output vector y
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgesellcmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
magmaFloatComplex alpha,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
magmaFloatComplex *d_y ){
// the kernel can only handle up to 65535 slices
// (~2M rows for blocksize 32)
dim3 grid( slices, 1, 1);
hipLaunchKernelGGL(( cgesellcmv_kernel), dim3(grid), dim3(blocksize), 0, magma_stream ,
m, n, blocksize, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
return MAGMA_SUCCESS;
}
| deb06c01b524845aa7ede9fc784a75bdaa30f776.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgesellcmv.cu normal z -> c, Wed Sep 17 15:08:43 2014
*/
#include "cuda_runtime.h"
#include <stdio.h>
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
#define PRECISION_c
// SELLC SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
__global__ void
cgesellcmv_kernel( int num_rows,
int num_cols,
int blocksize,
magmaFloatComplex alpha,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
magmaFloatComplex *d_y)
{
// threads assigned to rows
int Idx = blockDim.x * blockIdx.x + threadIdx.x ;
int offset = d_rowptr[ blockIdx.x ];
int border = (d_rowptr[ blockIdx.x+1 ]-offset)/blocksize;
if(Idx < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++){
int col = d_colind [offset+ blocksize * n + threadIdx.x ];
magmaFloatComplex val = d_val[offset+ blocksize * n + threadIdx.x];
if( val != 0){
dot=dot+val*d_x[col];
}
}
d_y[ Idx ] = dot * alpha + beta * d_y [ Idx ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is SELLC/SELLP.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
blocksize magma_int_t
number of rows in one ELL-slice
@param
slices magma_int_t
number of slices in matrix
@param
alignment magma_int_t
number of threads assigned to one row (=1)
@param
alpha magmaFloatComplex
scalar multiplier
@param
d_val magmaFloatComplex*
array containing values of A in SELLC/P
@param
d_colind magma_int_t*
columnindices of A in SELLC/P
@param
d_rowptr magma_int_t*
rowpointer of SELLP
@param
d_x magmaFloatComplex*
input vector x
@param
beta magmaFloatComplex
scalar multiplier
@param
d_y magmaFloatComplex*
input/output vector y
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgesellcmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
magmaFloatComplex alpha,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
magmaFloatComplex *d_y ){
// the kernel can only handle up to 65535 slices
// (~2M rows for blocksize 32)
dim3 grid( slices, 1, 1);
cgesellcmv_kernel<<< grid, blocksize, 0, magma_stream >>>
( m, n, blocksize, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
return MAGMA_SUCCESS;
}
|
be9952c0ec2dc1d6ffdaf04475e34b2b60d0d356.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <iomanip>
#include <hip/hip_runtime.h>
using namespace std;
void MatrixRandBin(float *mat,int rows,int cols){
for (int i=0;i<rows;i++){
for (int j=0;j<cols;j++){
if((float)rand()/RAND_MAX>0.5){
mat[i*cols+j]=1.0f;
}else{
mat[i*cols+j]=-1.0f;
}
}
}
}
void MatrixPrint(float *mat,int rows,int cols){
for (int i=0;i<rows;i++){
for (int j=0;j<cols;j++){
cout<<setw(2)<<mat[i*cols+j]<<" ";
}
cout<<endl;
}
cout<<endl;
}
void MatrixPrintD(int *mat,int rows,int cols){
for (int i=0;i<rows;i++){
for (int j=0;j<cols;j++){
cout<<setw(2)<<mat[i*cols+j]<<" ";
}
cout<<endl;
}
cout<<endl;
}
float MatrixCompare(float *a,float *b,int rows,int cols){
float err=0;
for (int i=0;i<rows;i++){
for (int j=0;j<cols;j++){
err+=abs(a[i*cols+j]-b[i*cols+j]);
}
}
return err;
}
void MatrixMul_host(float *a,int a_rows,int a_cols,float *b,int b_rows,int b_cols,float *c)
{
for(int i = 0; i < a_rows; i++) {
for(int j = 0; j < b_cols; j++) {
float t = 0;
for(int k = 0; k < b_rows; k++) {
t += a[i * a_cols + k] * b[k * b_cols + j];
}
c[i * b_cols + j] = t;
}
}
}
//horizontal
__global__ void AMatrix2Bin(float *a,int *a_bin,int a_rows,int pitch_a,int pitch_a_bin,int MaxBS,int BINSIZE){
int tix=threadIdx.x;
// int tiy=threadIdx.y;
int bix=blockIdx.x;
// int biy=blockIdx.y;
int bdx=blockDim.x;
// int bdy=blockDim.y;
int gdx=gridDim.x;
// int gdy=gridDim.y;
int maxThreads=MaxBS*a_rows;
for(int id = bix*bdx+tix; id < maxThreads; id+=gdx*bdx) {
int rid=id/MaxBS;
int cid=id%MaxBS;
int Integer=0;
int base=1;
for (int i=0;i<BINSIZE;i++){
if (a[rid*pitch_a+(cid+1)*BINSIZE-1-i]==1.f){
Integer+=base;
}
base=base<<1;
}
a_bin[rid*pitch_a_bin+cid]=Integer;
}
}
//vetical
__global__ void BMatrix2Bin(float *b,int *b_bin,int b_cols,int pitch_b,int pitch_b_bin,int MaxBS,int BINSIZE){
int tix=threadIdx.x;
// int tiy=threadIdx.y;
int bix=blockIdx.x;
// int biy=blockIdx.y;
int bdx=blockDim.x;
// int bdy=blockDim.y;
int gdx=gridDim.x;
// int gdy=gridDim.y;
int maxThreads=MaxBS*b_cols;
for(int id = bix*bdx+tix; id < maxThreads; id+=gdx*bdx) {
int cid=id/MaxBS;
int rid=id%MaxBS;
int Integer=0;
int base=1;
for (int i=0;i<BINSIZE;i++){
if (b[((rid+1)*BINSIZE-1-i)*pitch_b+cid]==1.f){
Integer+=base;
}
base=base<<1;
}
b_bin[rid*pitch_b_bin+cid]=Integer;
}
}
__device__ unsigned char __popcount_tab_device[256];//__constant__ is slower than __device__
__device__ int popcount (int x) {
return __popcount_tab_device[(x >> 0) & 0xff]
+ __popcount_tab_device[(x >> 8) & 0xff]
+ __popcount_tab_device[(x >> 16) & 0xff]
+ __popcount_tab_device[(x >> 24) & 0xff];
}
__global__ void MatrixMulXnor(int *a,int *b,int a_rows,int a_cols,
int b_cols,float *result,int pitch_a,int pitch_b,
int pitch_result,int BINSIZE,int RealMidSize){
int tix=threadIdx.x;
// int tiy=threadIdx.y;
int bix=blockIdx.x;
// int biy=blockIdx.y;
int bdx=blockDim.x;
// int bdy=blockDim.y;
int gdx=gridDim.x;
// int gdy=gridDim.y;
int rest=(BINSIZE*a_cols-RealMidSize);
for(int j=tix;j<b_cols;j+=bdx){
// printf("i=%d ; j=%d\n",i,j);
int sum=0;
for(int k=0;k<a_cols;k++){
int bin=(a[bix*pitch_a+k]^b[k*pitch_b+j]);
int negnum=popcount(bin);
int posnum=BINSIZE-negnum;
//calculate ignores the rest of BINSIZE if the Matsize cant devided by BINSIZE ,it can cause err
//(10/00)'(01/00) should be 0000 but it is 0011,so 1+1 is trash in the result.and it can cause a_rows*b_cols times.
sum+=(posnum-negnum);
}
result[bix*pitch_result+j]=sum-rest;
}
}
void MatrixMul_device(float *a,float *b,int a_rows,int a_cols,int b_cols,float *result){
int BINSIZE=30;//size of bin2int, 32 means 0000 0000 0000 0000 0000 0000 0000 0000
int MaxBS=(a_cols-1)/BINSIZE+1;
int a_cols_Copysize=MaxBS*BINSIZE;
dim3 BS_BIN(512,1,1);
dim3 GS_BIN(6,1,1);
float *a_device;//a_rows * a_cols_Copysize
float *b_device;//a_cols_Copysize * b_cols
size_t pitch_a_device, pitch_b_device;
hipMallocPitch((void**)&a_device , &pitch_a_device , sizeof(float) *a_cols_Copysize , a_rows);
hipMallocPitch((void**)&b_device , &pitch_b_device , sizeof(float) *b_cols , a_cols_Copysize);
hipMemset(a_device, 0, pitch_a_device * a_rows);
hipMemset(b_device, 0, pitch_b_device * a_cols_Copysize);
hipMemcpy2D(a_device,pitch_a_device,a,sizeof(float) *a_cols ,sizeof(float) *a_cols, a_rows,hipMemcpyDeviceToDevice);
hipMemcpy2D(b_device,pitch_b_device,b,sizeof(float) *b_cols ,sizeof(float) *b_cols, a_cols,hipMemcpyDeviceToDevice);
//check oringin
// float *a_host;
// float *b_host;
// a_host = (float*) malloc(sizeof(float) * a_cols_Copysize * a_rows);
// b_host = (float*) malloc(sizeof(float) * b_cols * a_cols_Copysize);
// hipMemcpy2D(a_host,sizeof(float) *a_cols_Copysize, a_device,pitch_a_device,sizeof(float) *a_cols_Copysize , a_rows,hipMemcpyDeviceToHost);
// hipMemcpy2D(b_host,sizeof(float) *b_cols, b_device,pitch_b_device,sizeof(float) *b_cols , a_cols_Copysize,hipMemcpyDeviceToHost);
// MatrixPrint(a_host,a_rows,a_cols_Copysize);
// MatrixPrint(b_host,a_cols_Copysize,b_cols);
int *a_device_bin;
int *b_device_bin;
size_t pitch_a_device_bin, pitch_b_device_bin;
hipMallocPitch((void**)&a_device_bin , &pitch_a_device_bin , sizeof(int) *MaxBS , a_rows);
hipMallocPitch((void**)&b_device_bin , &pitch_b_device_bin , sizeof(int) *b_cols , MaxBS);
hipLaunchKernelGGL(( AMatrix2Bin), dim3(GS_BIN),dim3(BS_BIN), 0, 0, a_device , a_device_bin , a_rows ,
pitch_a_device/sizeof(float) , pitch_a_device_bin/sizeof(int) , MaxBS , BINSIZE);
hipLaunchKernelGGL(( BMatrix2Bin), dim3(GS_BIN),dim3(BS_BIN), 0, 0, b_device , b_device_bin , b_cols ,
pitch_b_device/sizeof(float) , pitch_b_device_bin/sizeof(int) , MaxBS , BINSIZE);
//check bin
// int *a_host_bin;
// int *b_host_bin;
// a_host_bin = (int*) malloc(sizeof(int) *MaxBS * a_rows);
// b_host_bin = (int*) malloc(sizeof(int) *b_cols * MaxBS);
// hipMemcpy2D(a_host_bin,sizeof(int) *MaxBS, a_device_bin,pitch_a_device_bin,sizeof(int) *MaxBS , a_rows ,hipMemcpyDeviceToHost);
// hipMemcpy2D(b_host_bin,sizeof(int) *b_cols, b_device_bin,pitch_b_device_bin,sizeof(int) *b_cols , MaxBS ,hipMemcpyDeviceToHost);
// MatrixPrintD(a_host_bin,a_rows,MaxBS);
// MatrixPrintD(b_host_bin,MaxBS,b_cols);
float *result_device;//a_rows * b_cols
size_t pitch_result_device;
hipMallocPitch((void**)&result_device , &pitch_result_device , sizeof(float) *b_cols , a_rows);
const unsigned char __popcount_tab[] = {
0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,4,5,5,6,5,6,6,7,5,6,6,7,6,7,7,8,
};
hipMemcpyToSymbol(__popcount_tab_device, __popcount_tab, sizeof(__popcount_tab));
hipEvent_t start_device, stop_device;
float time_device;
hipEventCreate(&start_device);
hipEventCreate(&stop_device);
hipEventRecord( start_device, 0 );
dim3 BS_MM(32,1,1);
dim3 GS_MM(1000,1,1);
hipLaunchKernelGGL(( MatrixMulXnor), dim3(GS_MM),dim3(BS_MM), 0, 0, a_device_bin , b_device_bin , a_rows , MaxBS , b_cols ,
result_device , pitch_a_device_bin/sizeof(int) , pitch_b_device_bin/sizeof(int) ,
pitch_result_device/sizeof(float) , BINSIZE , a_cols);
hipEventRecord( stop_device, 0 );
hipEventSynchronize( stop_device );
hipEventElapsedTime( &time_device, start_device, stop_device );
hipEventDestroy( start_device );
hipEventDestroy( stop_device );
cout<<"gputime="<<time_device<<"ms"<<endl;
hipMemcpy2D(result,sizeof(float) *b_cols, result_device,pitch_result_device,sizeof(float) *b_cols , a_rows ,hipMemcpyDeviceToDevice);
hipFree(a_device);
hipFree(b_device);
hipFree(a_device_bin);
hipFree(b_device_bin);
hipFree(result_device);
}
int main(){
//simulate pytorch param
int Matrixsize=1000;
float *a_host;
float *b_host;
float *result_host;
a_host = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize);
b_host = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize);
result_host = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize);
srand(0);
MatrixRandBin(a_host,Matrixsize,Matrixsize);
MatrixRandBin(b_host,Matrixsize,Matrixsize);
// cout<<MatrixCopysize<<endl;
float *a_device;
float *b_device;
float *result_device;
hipMalloc((void**)&a_device,sizeof(float) *Matrixsize * Matrixsize);
hipMalloc((void**)&b_device,sizeof(float) *Matrixsize * Matrixsize);
hipMalloc((void**)&result_device,sizeof(float) *Matrixsize * Matrixsize);
hipMemcpy(a_device,a_host,sizeof(float) *Matrixsize * Matrixsize,hipMemcpyHostToDevice);
hipMemcpy(b_device,b_host,sizeof(float) *Matrixsize * Matrixsize,hipMemcpyHostToDevice);
// MatrixPrint(a_host,Matrixsize,Matrixsize);
// MatrixPrint(b_host,Matrixsize,Matrixsize);
//run in gpu warp in C code
MatrixMul_device(a_device,b_device,Matrixsize,Matrixsize,Matrixsize,result_device);
hipMemcpy(result_host, result_device,sizeof(float) *Matrixsize * Matrixsize,hipMemcpyDeviceToHost);
hipFree(a_device);
hipFree(b_device);
hipFree(result_device);
// MatrixPrint(result_host,Matrixsize,Matrixsize);
//run in cpu
float *result_cpu;
result_cpu = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize);
clock_t start_host = clock();
MatrixMul_host(a_host,Matrixsize,Matrixsize,b_host,Matrixsize,Matrixsize,result_cpu);
cout<<"cputime="<<(double)(clock() - start_host)/1000<<"ms"<<endl;
// MatrixPrint(result_cpu,Matrixsize,Matrixsize);
//compare value of gpu and cpu
float err=MatrixCompare(result_cpu,result_host,Matrixsize,Matrixsize);
cout<<"err in gpu and cpu = "<<err<<endl;
return 0;
} | be9952c0ec2dc1d6ffdaf04475e34b2b60d0d356.cu | #include <iostream>
#include <stdio.h>
#include <iomanip>
#include <cuda_runtime.h>
using namespace std;
void MatrixRandBin(float *mat,int rows,int cols){
for (int i=0;i<rows;i++){
for (int j=0;j<cols;j++){
if((float)rand()/RAND_MAX>0.5){
mat[i*cols+j]=1.0f;
}else{
mat[i*cols+j]=-1.0f;
}
}
}
}
void MatrixPrint(float *mat,int rows,int cols){
for (int i=0;i<rows;i++){
for (int j=0;j<cols;j++){
cout<<setw(2)<<mat[i*cols+j]<<" ";
}
cout<<endl;
}
cout<<endl;
}
void MatrixPrintD(int *mat,int rows,int cols){
for (int i=0;i<rows;i++){
for (int j=0;j<cols;j++){
cout<<setw(2)<<mat[i*cols+j]<<" ";
}
cout<<endl;
}
cout<<endl;
}
float MatrixCompare(float *a,float *b,int rows,int cols){
float err=0;
for (int i=0;i<rows;i++){
for (int j=0;j<cols;j++){
err+=abs(a[i*cols+j]-b[i*cols+j]);
}
}
return err;
}
void MatrixMul_host(float *a,int a_rows,int a_cols,float *b,int b_rows,int b_cols,float *c)
{
for(int i = 0; i < a_rows; i++) {
for(int j = 0; j < b_cols; j++) {
float t = 0;
for(int k = 0; k < b_rows; k++) {
t += a[i * a_cols + k] * b[k * b_cols + j];
}
c[i * b_cols + j] = t;
}
}
}
//horizontal
__global__ void AMatrix2Bin(float *a,int *a_bin,int a_rows,int pitch_a,int pitch_a_bin,int MaxBS,int BINSIZE){
int tix=threadIdx.x;
// int tiy=threadIdx.y;
int bix=blockIdx.x;
// int biy=blockIdx.y;
int bdx=blockDim.x;
// int bdy=blockDim.y;
int gdx=gridDim.x;
// int gdy=gridDim.y;
int maxThreads=MaxBS*a_rows;
for(int id = bix*bdx+tix; id < maxThreads; id+=gdx*bdx) {
int rid=id/MaxBS;
int cid=id%MaxBS;
int Integer=0;
int base=1;
for (int i=0;i<BINSIZE;i++){
if (a[rid*pitch_a+(cid+1)*BINSIZE-1-i]==1.f){
Integer+=base;
}
base=base<<1;
}
a_bin[rid*pitch_a_bin+cid]=Integer;
}
}
//vetical
__global__ void BMatrix2Bin(float *b,int *b_bin,int b_cols,int pitch_b,int pitch_b_bin,int MaxBS,int BINSIZE){
int tix=threadIdx.x;
// int tiy=threadIdx.y;
int bix=blockIdx.x;
// int biy=blockIdx.y;
int bdx=blockDim.x;
// int bdy=blockDim.y;
int gdx=gridDim.x;
// int gdy=gridDim.y;
int maxThreads=MaxBS*b_cols;
for(int id = bix*bdx+tix; id < maxThreads; id+=gdx*bdx) {
int cid=id/MaxBS;
int rid=id%MaxBS;
int Integer=0;
int base=1;
for (int i=0;i<BINSIZE;i++){
if (b[((rid+1)*BINSIZE-1-i)*pitch_b+cid]==1.f){
Integer+=base;
}
base=base<<1;
}
b_bin[rid*pitch_b_bin+cid]=Integer;
}
}
__device__ unsigned char __popcount_tab_device[256];//__constant__ is slower than __device__
__device__ int popcount (int x) {
return __popcount_tab_device[(x >> 0) & 0xff]
+ __popcount_tab_device[(x >> 8) & 0xff]
+ __popcount_tab_device[(x >> 16) & 0xff]
+ __popcount_tab_device[(x >> 24) & 0xff];
}
__global__ void MatrixMulXnor(int *a,int *b,int a_rows,int a_cols,
int b_cols,float *result,int pitch_a,int pitch_b,
int pitch_result,int BINSIZE,int RealMidSize){
int tix=threadIdx.x;
// int tiy=threadIdx.y;
int bix=blockIdx.x;
// int biy=blockIdx.y;
int bdx=blockDim.x;
// int bdy=blockDim.y;
int gdx=gridDim.x;
// int gdy=gridDim.y;
int rest=(BINSIZE*a_cols-RealMidSize);
for(int j=tix;j<b_cols;j+=bdx){
// printf("i=%d ; j=%d\n",i,j);
int sum=0;
for(int k=0;k<a_cols;k++){
int bin=(a[bix*pitch_a+k]^b[k*pitch_b+j]);
int negnum=popcount(bin);
int posnum=BINSIZE-negnum;
//calculate ignores the rest of BINSIZE if the Matsize cant devided by BINSIZE ,it can cause err
//(10/00)'(01/00) should be 0000 but it is 0011,so 1+1 is trash in the result.and it can cause a_rows*b_cols times.
sum+=(posnum-negnum);
}
result[bix*pitch_result+j]=sum-rest;
}
}
void MatrixMul_device(float *a,float *b,int a_rows,int a_cols,int b_cols,float *result){
int BINSIZE=30;//size of bin2int, 32 means 0000 0000 0000 0000 0000 0000 0000 0000
int MaxBS=(a_cols-1)/BINSIZE+1;
int a_cols_Copysize=MaxBS*BINSIZE;
dim3 BS_BIN(512,1,1);
dim3 GS_BIN(6,1,1);
float *a_device;//a_rows * a_cols_Copysize
float *b_device;//a_cols_Copysize * b_cols
size_t pitch_a_device, pitch_b_device;
cudaMallocPitch((void**)&a_device , &pitch_a_device , sizeof(float) *a_cols_Copysize , a_rows);
cudaMallocPitch((void**)&b_device , &pitch_b_device , sizeof(float) *b_cols , a_cols_Copysize);
cudaMemset(a_device, 0, pitch_a_device * a_rows);
cudaMemset(b_device, 0, pitch_b_device * a_cols_Copysize);
cudaMemcpy2D(a_device,pitch_a_device,a,sizeof(float) *a_cols ,sizeof(float) *a_cols, a_rows,cudaMemcpyDeviceToDevice);
cudaMemcpy2D(b_device,pitch_b_device,b,sizeof(float) *b_cols ,sizeof(float) *b_cols, a_cols,cudaMemcpyDeviceToDevice);
//check oringin
// float *a_host;
// float *b_host;
// a_host = (float*) malloc(sizeof(float) * a_cols_Copysize * a_rows);
// b_host = (float*) malloc(sizeof(float) * b_cols * a_cols_Copysize);
// cudaMemcpy2D(a_host,sizeof(float) *a_cols_Copysize, a_device,pitch_a_device,sizeof(float) *a_cols_Copysize , a_rows,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(b_host,sizeof(float) *b_cols, b_device,pitch_b_device,sizeof(float) *b_cols , a_cols_Copysize,cudaMemcpyDeviceToHost);
// MatrixPrint(a_host,a_rows,a_cols_Copysize);
// MatrixPrint(b_host,a_cols_Copysize,b_cols);
int *a_device_bin;
int *b_device_bin;
size_t pitch_a_device_bin, pitch_b_device_bin;
cudaMallocPitch((void**)&a_device_bin , &pitch_a_device_bin , sizeof(int) *MaxBS , a_rows);
cudaMallocPitch((void**)&b_device_bin , &pitch_b_device_bin , sizeof(int) *b_cols , MaxBS);
AMatrix2Bin<<<GS_BIN,BS_BIN>>>(a_device , a_device_bin , a_rows ,
pitch_a_device/sizeof(float) , pitch_a_device_bin/sizeof(int) , MaxBS , BINSIZE);
BMatrix2Bin<<<GS_BIN,BS_BIN>>>(b_device , b_device_bin , b_cols ,
pitch_b_device/sizeof(float) , pitch_b_device_bin/sizeof(int) , MaxBS , BINSIZE);
//check bin
// int *a_host_bin;
// int *b_host_bin;
// a_host_bin = (int*) malloc(sizeof(int) *MaxBS * a_rows);
// b_host_bin = (int*) malloc(sizeof(int) *b_cols * MaxBS);
// cudaMemcpy2D(a_host_bin,sizeof(int) *MaxBS, a_device_bin,pitch_a_device_bin,sizeof(int) *MaxBS , a_rows ,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(b_host_bin,sizeof(int) *b_cols, b_device_bin,pitch_b_device_bin,sizeof(int) *b_cols , MaxBS ,cudaMemcpyDeviceToHost);
// MatrixPrintD(a_host_bin,a_rows,MaxBS);
// MatrixPrintD(b_host_bin,MaxBS,b_cols);
float *result_device;//a_rows * b_cols
size_t pitch_result_device;
cudaMallocPitch((void**)&result_device , &pitch_result_device , sizeof(float) *b_cols , a_rows);
const unsigned char __popcount_tab[] = {
0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,4,5,5,6,5,6,6,7,5,6,6,7,6,7,7,8,
};
cudaMemcpyToSymbol(__popcount_tab_device, __popcount_tab, sizeof(__popcount_tab));
cudaEvent_t start_device, stop_device;
float time_device;
cudaEventCreate(&start_device);
cudaEventCreate(&stop_device);
cudaEventRecord( start_device, 0 );
dim3 BS_MM(32,1,1);
dim3 GS_MM(1000,1,1);
MatrixMulXnor<<<GS_MM,BS_MM>>>(a_device_bin , b_device_bin , a_rows , MaxBS , b_cols ,
result_device , pitch_a_device_bin/sizeof(int) , pitch_b_device_bin/sizeof(int) ,
pitch_result_device/sizeof(float) , BINSIZE , a_cols);
cudaEventRecord( stop_device, 0 );
cudaEventSynchronize( stop_device );
cudaEventElapsedTime( &time_device, start_device, stop_device );
cudaEventDestroy( start_device );
cudaEventDestroy( stop_device );
cout<<"gputime="<<time_device<<"ms"<<endl;
cudaMemcpy2D(result,sizeof(float) *b_cols, result_device,pitch_result_device,sizeof(float) *b_cols , a_rows ,cudaMemcpyDeviceToDevice);
cudaFree(a_device);
cudaFree(b_device);
cudaFree(a_device_bin);
cudaFree(b_device_bin);
cudaFree(result_device);
}
int main(){
//simulate pytorch param
int Matrixsize=1000;
float *a_host;
float *b_host;
float *result_host;
a_host = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize);
b_host = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize);
result_host = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize);
srand(0);
MatrixRandBin(a_host,Matrixsize,Matrixsize);
MatrixRandBin(b_host,Matrixsize,Matrixsize);
// cout<<MatrixCopysize<<endl;
float *a_device;
float *b_device;
float *result_device;
cudaMalloc((void**)&a_device,sizeof(float) *Matrixsize * Matrixsize);
cudaMalloc((void**)&b_device,sizeof(float) *Matrixsize * Matrixsize);
cudaMalloc((void**)&result_device,sizeof(float) *Matrixsize * Matrixsize);
cudaMemcpy(a_device,a_host,sizeof(float) *Matrixsize * Matrixsize,cudaMemcpyHostToDevice);
cudaMemcpy(b_device,b_host,sizeof(float) *Matrixsize * Matrixsize,cudaMemcpyHostToDevice);
// MatrixPrint(a_host,Matrixsize,Matrixsize);
// MatrixPrint(b_host,Matrixsize,Matrixsize);
//run in gpu warp in C code
MatrixMul_device(a_device,b_device,Matrixsize,Matrixsize,Matrixsize,result_device);
cudaMemcpy(result_host, result_device,sizeof(float) *Matrixsize * Matrixsize,cudaMemcpyDeviceToHost);
cudaFree(a_device);
cudaFree(b_device);
cudaFree(result_device);
// MatrixPrint(result_host,Matrixsize,Matrixsize);
//run in cpu
float *result_cpu;
result_cpu = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize);
clock_t start_host = clock();
MatrixMul_host(a_host,Matrixsize,Matrixsize,b_host,Matrixsize,Matrixsize,result_cpu);
cout<<"cputime="<<(double)(clock() - start_host)/1000<<"ms"<<endl;
// MatrixPrint(result_cpu,Matrixsize,Matrixsize);
//compare value of gpu and cpu
float err=MatrixCompare(result_cpu,result_host,Matrixsize,Matrixsize);
cout<<"err in gpu and cpu = "<<err<<endl;
return 0;
} |
424c1c70886b93c39306e3428740acdf928b2419.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#ifdef USE_ROCM
#include "thrust/device_vector.h"
#endif
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/util/math_functions.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea_im2col.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
#ifdef USE_ROCM
template<typename Dtype>
__global__ void kernel_channel_max(const int_tp num, const int_tp channels,
const int_tp spatial_dim, const Dtype* data,
Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int_tp n = index / spatial_dim;
int_tp s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int_tp c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template<typename Dtype>
__global__ void kernel_channel_subtract(const int_tp count, const int_tp num,
const int_tp channels,
const int_tp spatial_dim,
const Dtype* channel_max, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int_tp n = index / channels / spatial_dim;
int_tp s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template<typename Dtype>
__global__ void kernel_exp(const int_tp count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template<typename Dtype>
__global__ void kernel_channel_sum(const int_tp num, const int_tp channels,
const int_tp spatial_dim, const Dtype* data,
Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int_tp n = index / spatial_dim;
int_tp s = index % spatial_dim;
Dtype sum = 0;
for (int_tp c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template<typename Dtype>
__global__ void kernel_channel_div(const int_tp count, const int_tp num,
const int_tp channels,
const int_tp spatial_dim,
const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int_tp n = index / channels / spatial_dim;
int_tp s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template<typename Dtype>
__global__ void kernel_channel_dot(const int_tp num, const int_tp channels,
const int_tp spatial_dim,
const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int_tp n = index / spatial_dim;
int_tp s = index % spatial_dim;
Dtype dot = 0;
for (int_tp c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
#endif
template<typename Dtype>
void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
int_tp count = bottom[0]->count();
int_tp channels = top[0]->shape(softmax_axis_);
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// CUDA backend code
caffe_copy(count, bottom_data, top_data);
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_max<Dtype> CUDA_KERNEL(
CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS)(outer_num_, channels, inner_num_, top_data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(count, outer_num_, channels, inner_num_,
scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_exp<Dtype> CUDA_KERNEL(
CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(count, top_data,
top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype> CUDA_KERNEL(
CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS)(outer_num_, channels,
inner_num_, top_data, scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_div<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(count, outer_num_, channels, inner_num_,
scale_data, top_data);
#endif
} else {
#ifdef USE_GREENTEA
if (outer_num_ == 0)
return;
viennacl::ocl::context &ctx = viennacl::ocl::get_context
(this->device_->id());
if (this->device_->CheckCapability("cl_intel_subgroups")
&& inner_num_ < 128) {
viennacl::ocl::program &program = this->device_->program();
viennacl::ocl::kernel *oclk_softmax_forward_kernel;
if (use_slm_)
oclk_softmax_forward_kernel = &program.get_kernel(
CL_KERNEL_SELECT("softmax_forward_slm"));
else
oclk_softmax_forward_kernel = &program.get_kernel(
CL_KERNEL_SELECT("softmax_forward"));
oclk_softmax_forward_kernel->local_work_size(0, 256);
oclk_softmax_forward_kernel->local_work_size(1, 1);
oclk_softmax_forward_kernel->local_work_size(2, 1);
oclk_softmax_forward_kernel->global_work_size(0, 256);
oclk_softmax_forward_kernel->global_work_size(1, outer_num_);
oclk_softmax_forward_kernel->global_work_size(2, 1);
if (use_slm_) {
viennacl::ocl::local_mem data_tmp(channels * inner_num_ *
sizeof(Dtype));
viennacl::ocl::local_mem scale_tmp(inner_num_ * sizeof(Dtype));
viennacl::ocl::local_mem group_tmp(16 * inner_num_ * sizeof(Dtype));
viennacl::ocl::enqueue(
(*oclk_softmax_forward_kernel)(outer_num_, channels, inner_num_,
WrapHandle((cl_mem) scale_data, &ctx),
WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx),
data_tmp, scale_tmp, group_tmp),
ctx.get_queue());
} else {
viennacl::ocl::enqueue(
(*oclk_softmax_forward_kernel)(outer_num_, channels, inner_num_,
WrapHandle((cl_mem) scale_data, &ctx),
WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
}
} else {
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0,
&ctx);
viennacl::ocl::kernel &oclk_channel_max = program.get_kernel(
CL_KERNEL_SELECT("kernel_channel_max"));
viennacl::ocl::enqueue(
oclk_channel_max(outer_num_, channels, inner_num_,
WrapHandle((cl_mem) top_data, &ctx),
WrapHandle((cl_mem) scale_data, &ctx)),
ctx.get_queue());
viennacl::ocl::kernel &oclk_channel_subtract = program.get_kernel(
CL_KERNEL_SELECT("kernel_channel_subtract"));
viennacl::ocl::enqueue(
oclk_channel_subtract(count, outer_num_, channels, inner_num_,
WrapHandle((cl_mem) scale_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
viennacl::ocl::kernel &oclk_exp = program.get_kernel(
CL_KERNEL_SELECT("kernel_exp"));
viennacl::ocl::enqueue(
oclk_exp(count,
WrapHandle((cl_mem) top_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
viennacl::ocl::kernel &oclk_channel_sum = program.get_kernel(
CL_KERNEL_SELECT("kernel_channel_sum"));
viennacl::ocl::enqueue(
oclk_channel_sum(outer_num_, channels, inner_num_,
WrapHandle((cl_mem) top_data, &ctx),
WrapHandle((cl_mem) scale_data, &ctx)),
ctx.get_queue());
viennacl::ocl::kernel &oclk_channel_div = program.get_kernel(
CL_KERNEL_SELECT("kernel_channel_div"));
viennacl::ocl::enqueue(
oclk_channel_div(count, outer_num_, channels, inner_num_,
WrapHandle((cl_mem) scale_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
}
#endif
}
}
template<typename Dtype>
void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* scale_data = scale_.mutable_gpu_data();
int_tp count = top[0]->count();
int_tp channels = top[0]->shape(softmax_axis_);
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
caffe_copy(top[0]->count(), top_diff, bottom_diff);
// Compute inner1d(top_diff, top_data) and
// subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<Dtype> CUDA_KERNEL(
CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS)(outer_num_, channels, inner_num_,
top_diff, top_data, scale_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(count, outer_num_, channels, inner_num_,
scale_data, bottom_diff);
// elementwise multiplication
caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
#endif
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
greentea_copy<Dtype>(top[0]->count(), (cl_mem)top_diff,
0, (cl_mem)bottom_diff, 0, &ctx);
viennacl::ocl::kernel &oclk_channel_dot = program.get_kernel(
CL_KERNEL_SELECT("kernel_channel_dot"));
viennacl::ocl::enqueue(
oclk_channel_dot(outer_num_, channels, inner_num_,
WrapHandle((cl_mem)top_diff, &ctx),
WrapHandle((cl_mem)top_data, &ctx),
WrapHandle((cl_mem)scale_data, &ctx)),
ctx.get_queue());
viennacl::ocl::kernel &oclk_channel_subtract = program.get_kernel(
CL_KERNEL_SELECT("kernel_channel_subtract"));
viennacl::ocl::enqueue(
oclk_channel_subtract(count, outer_num_, channels, inner_num_,
WrapHandle((cl_mem)scale_data, &ctx),
WrapHandle((cl_mem)bottom_diff, &ctx)),
ctx.get_queue());
greentea_gpu_mul<Dtype>(this->device_->id(), top[0]->count(),
(cl_mem)bottom_diff, 0,
(cl_mem)top_data, 0, (cl_mem)bottom_diff, 0);
#endif
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer);
} // namespace caffe
| 424c1c70886b93c39306e3428740acdf928b2419.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#ifdef USE_CUDA
#include "thrust/device_vector.h"
#endif
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/util/math_functions.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea_im2col.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
#ifdef USE_CUDA
template<typename Dtype>
__global__ void kernel_channel_max(const int_tp num, const int_tp channels,
const int_tp spatial_dim, const Dtype* data,
Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int_tp n = index / spatial_dim;
int_tp s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int_tp c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template<typename Dtype>
__global__ void kernel_channel_subtract(const int_tp count, const int_tp num,
const int_tp channels,
const int_tp spatial_dim,
const Dtype* channel_max, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int_tp n = index / channels / spatial_dim;
int_tp s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template<typename Dtype>
__global__ void kernel_exp(const int_tp count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template<typename Dtype>
__global__ void kernel_channel_sum(const int_tp num, const int_tp channels,
const int_tp spatial_dim, const Dtype* data,
Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int_tp n = index / spatial_dim;
int_tp s = index % spatial_dim;
Dtype sum = 0;
for (int_tp c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template<typename Dtype>
__global__ void kernel_channel_div(const int_tp count, const int_tp num,
const int_tp channels,
const int_tp spatial_dim,
const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int_tp n = index / channels / spatial_dim;
int_tp s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template<typename Dtype>
__global__ void kernel_channel_dot(const int_tp num, const int_tp channels,
const int_tp spatial_dim,
const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int_tp n = index / spatial_dim;
int_tp s = index % spatial_dim;
Dtype dot = 0;
for (int_tp c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
#endif
template<typename Dtype>
void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
int_tp count = bottom[0]->count();
int_tp channels = top[0]->shape(softmax_axis_);
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// CUDA backend code
caffe_copy(count, bottom_data, top_data);
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_max<Dtype> CUDA_KERNEL(
CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS)(outer_num_, channels, inner_num_, top_data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(count, outer_num_, channels, inner_num_,
scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_exp<Dtype> CUDA_KERNEL(
CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(count, top_data,
top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype> CUDA_KERNEL(
CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS)(outer_num_, channels,
inner_num_, top_data, scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_div<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(count, outer_num_, channels, inner_num_,
scale_data, top_data);
#endif
} else {
#ifdef USE_GREENTEA
if (outer_num_ == 0)
return;
viennacl::ocl::context &ctx = viennacl::ocl::get_context
(this->device_->id());
if (this->device_->CheckCapability("cl_intel_subgroups")
&& inner_num_ < 128) {
viennacl::ocl::program &program = this->device_->program();
viennacl::ocl::kernel *oclk_softmax_forward_kernel;
if (use_slm_)
oclk_softmax_forward_kernel = &program.get_kernel(
CL_KERNEL_SELECT("softmax_forward_slm"));
else
oclk_softmax_forward_kernel = &program.get_kernel(
CL_KERNEL_SELECT("softmax_forward"));
oclk_softmax_forward_kernel->local_work_size(0, 256);
oclk_softmax_forward_kernel->local_work_size(1, 1);
oclk_softmax_forward_kernel->local_work_size(2, 1);
oclk_softmax_forward_kernel->global_work_size(0, 256);
oclk_softmax_forward_kernel->global_work_size(1, outer_num_);
oclk_softmax_forward_kernel->global_work_size(2, 1);
if (use_slm_) {
viennacl::ocl::local_mem data_tmp(channels * inner_num_ *
sizeof(Dtype));
viennacl::ocl::local_mem scale_tmp(inner_num_ * sizeof(Dtype));
viennacl::ocl::local_mem group_tmp(16 * inner_num_ * sizeof(Dtype));
viennacl::ocl::enqueue(
(*oclk_softmax_forward_kernel)(outer_num_, channels, inner_num_,
WrapHandle((cl_mem) scale_data, &ctx),
WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx),
data_tmp, scale_tmp, group_tmp),
ctx.get_queue());
} else {
viennacl::ocl::enqueue(
(*oclk_softmax_forward_kernel)(outer_num_, channels, inner_num_,
WrapHandle((cl_mem) scale_data, &ctx),
WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
}
} else {
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0,
&ctx);
viennacl::ocl::kernel &oclk_channel_max = program.get_kernel(
CL_KERNEL_SELECT("kernel_channel_max"));
viennacl::ocl::enqueue(
oclk_channel_max(outer_num_, channels, inner_num_,
WrapHandle((cl_mem) top_data, &ctx),
WrapHandle((cl_mem) scale_data, &ctx)),
ctx.get_queue());
viennacl::ocl::kernel &oclk_channel_subtract = program.get_kernel(
CL_KERNEL_SELECT("kernel_channel_subtract"));
viennacl::ocl::enqueue(
oclk_channel_subtract(count, outer_num_, channels, inner_num_,
WrapHandle((cl_mem) scale_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
viennacl::ocl::kernel &oclk_exp = program.get_kernel(
CL_KERNEL_SELECT("kernel_exp"));
viennacl::ocl::enqueue(
oclk_exp(count,
WrapHandle((cl_mem) top_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
viennacl::ocl::kernel &oclk_channel_sum = program.get_kernel(
CL_KERNEL_SELECT("kernel_channel_sum"));
viennacl::ocl::enqueue(
oclk_channel_sum(outer_num_, channels, inner_num_,
WrapHandle((cl_mem) top_data, &ctx),
WrapHandle((cl_mem) scale_data, &ctx)),
ctx.get_queue());
viennacl::ocl::kernel &oclk_channel_div = program.get_kernel(
CL_KERNEL_SELECT("kernel_channel_div"));
viennacl::ocl::enqueue(
oclk_channel_div(count, outer_num_, channels, inner_num_,
WrapHandle((cl_mem) scale_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
}
#endif
}
}
template<typename Dtype>
void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* scale_data = scale_.mutable_gpu_data();
int_tp count = top[0]->count();
int_tp channels = top[0]->shape(softmax_axis_);
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
caffe_copy(top[0]->count(), top_diff, bottom_diff);
// Compute inner1d(top_diff, top_data) and
// subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<Dtype> CUDA_KERNEL(
CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS)(outer_num_, channels, inner_num_,
top_diff, top_data, scale_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(count, outer_num_, channels, inner_num_,
scale_data, bottom_diff);
// elementwise multiplication
caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
#endif
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
greentea_copy<Dtype>(top[0]->count(), (cl_mem)top_diff,
0, (cl_mem)bottom_diff, 0, &ctx);
viennacl::ocl::kernel &oclk_channel_dot = program.get_kernel(
CL_KERNEL_SELECT("kernel_channel_dot"));
viennacl::ocl::enqueue(
oclk_channel_dot(outer_num_, channels, inner_num_,
WrapHandle((cl_mem)top_diff, &ctx),
WrapHandle((cl_mem)top_data, &ctx),
WrapHandle((cl_mem)scale_data, &ctx)),
ctx.get_queue());
viennacl::ocl::kernel &oclk_channel_subtract = program.get_kernel(
CL_KERNEL_SELECT("kernel_channel_subtract"));
viennacl::ocl::enqueue(
oclk_channel_subtract(count, outer_num_, channels, inner_num_,
WrapHandle((cl_mem)scale_data, &ctx),
WrapHandle((cl_mem)bottom_diff, &ctx)),
ctx.get_queue());
greentea_gpu_mul<Dtype>(this->device_->id(), top[0]->count(),
(cl_mem)bottom_diff, 0,
(cl_mem)top_data, 0, (cl_mem)bottom_diff, 0);
#endif
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer);
} // namespace caffe
|
fe2681b867a593a86abc188f4f6715bbedd5978c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* CUDA implementation of FGP-TV [1] denoising/regularization model (2D/3D case)
*
* Input Parameters:
* 1. Noisy image/volume
* 2. lambdaPar - regularization parameter
* 3. Number of iterations
* 4. eplsilon: tolerance constant
* 5. TV-type: methodTV - 'iso' (0) or 'l1' (1)
* 6. nonneg: 'nonnegativity (0 is OFF by default)
*
* Output:
* [1] Filtered/regularized image/volume
* [2] Information vector which contains [iteration no., reached tolerance]
*
* This function is based on the Matlab's code and paper by
* [1] Amir Beck and Marc Teboulle, "Fast Gradient-Based Algorithms for Constrained Total Variation Image Denoising and Deblurring Problems"
*/
//
// tv_fgp.cu
//
// Created by Hovden Group on 9/1/20.
// Adapted from CCPI-Regularization-Toolkit
//
#include "tv_fgp.h"
#include "shared.h"
#include <thrust/functional.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/transform_reduce.h>
#include <cmath>
#include <stdio.h>
#define BLKXSIZE 8
#define MAX(x,y) (x>y?x:y)
#define MIN(x,y) (x<y?x:y)
#define ABS(x) (x>0?x:-x)
#define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) )
__global__ void Obj_func3D_kernel(float *Ad, float *D, float *R1, float *R2, float *R3, int N, int M, int Z, int ImSize, float lambda)
{
float val1, val2, val3;
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (Z*M)*i + Z*j + k;
if ((i < N) && (j < M) && (k < Z)) {
if (i <= 0) {val1 = 0.0f;} else {val1 = R1[(Z*M)*(i-1) + Z*j + k];}
if (j <= 0) {val2 = 0.0f;} else {val2 = R2[(Z*M)*i + Z*(j-1) + k];}
if (k <= 0) {val3 = 0.0f;} else {val3 = R3[(Z*M)*i + Z*j + (k-1)];}
//Write final result to global memory
D[index] = Ad[index] - lambda*(R1[index] + R2[index] + R3[index] - val1 - val2 - val3);
}
return;
}
__global__ void Grad_func3D_kernel(float *P1, float *P2, float *P3, float *D, int N, int M, int Z, int ImSize, float multip)
{
float val1,val2,val3;
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (Z*M)*i + Z*j + k;
if ((i < N) && (j < M) && (k < Z)) {
// boundary conditions
if (i >= N-1) val1 = 0.0f; else val1 = D[index] - D[(Z*M)*(i+1) + Z*j + k];
if (j >= M-1) val2 = 0.0f; else val2 = D[index] - D[(Z*M)*i + Z*(j+1) + k];
if (k >= Z-1) val3 = 0.0f; else val3 = D[index] - D[(Z*M)*i + Z*j + (k+1)];
//Write final result to global memory
P1[index] += multip * val1;
P2[index] += multip * val2;
P3[index] += multip * val3;
}
return;
}
__global__ void Proj_func3D_iso_kernel(float *P1, float *P2, float *P3, int N, int M, int Z, int ImSize)
{
float denom,sq_denom;
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (Z*M)*i + Z*j + k;
if ((i < N) && (j < M) && (k < Z)) {
denom = pow(P1[index],2) + pow(P2[index],2) + pow(P3[index],2);
if (denom > 1.0f) {
sq_denom = 1.0f/sqrt(denom);
P1[index] = P1[index]*sq_denom;
P2[index] = P2[index]*sq_denom;
P3[index] = P3[index]*sq_denom;
}
}
return;
}
__global__ void Proj_func3D_aniso_kernel(float *P1, float *P2, float *P3, int N, int M, int Z, int ImSize)
{
float val1, val2, val3;
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (Z*M)*i + Z*j + k;
if ((i < N) && (j < M) && (k < Z)) {
val1 = abs(P1[index]);
val2 = abs(P2[index]);
val3 = abs(P3[index]);
if (val1 < 1.0f) {val1 = 1.0f;}
if (val2 < 1.0f) {val2 = 1.0f;}
if (val3 < 1.0f) {val3 = 1.0f;}
P1[index] = P1[index]/val1;
P2[index] = P2[index]/val2;
P3[index] = P3[index]/val3;
}
return;
}
__global__ void nonneg3D_kernel(float* Output, int N, int M, int Z, int num_total)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (Z*M)*i + Z*j + k;
if (index < num_total) {
if (Output[index] < 0.0f) Output[index] = 0.0f;
}
}
__global__ void FGPResidCalc3D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int Z, int num_total)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (Z*M)*i + Z*j + k;
if (index < num_total) {
Output[index] = Input1[index] - Input2[index];
}
}
//Measure Reconstruction's TV.
__global__ void tv_kernel_3D(float *vol, float *tv_recon, int nx, int ny, int nz)
{
float eps = 1e-6;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int ijk = (ny*nz)*i + nz*j + k;
int ip = (ny*nz)*((i+1)%nx) + nz*j + k;
int jp = (ny*nz)*i + nz*((j+1)%ny) + k;
int kp = (ny*nz)*i + nz*j + ((k+1)%nz);
if ((i < nx) && (j < ny) && (k < nz)) {
tv_recon[ijk] = sqrt(eps + ( vol[ijk] - vol[ip] ) * ( vol[ijk] - vol[ip] )
+ ( vol[ijk] - vol[jp] ) * ( vol[ijk] - vol[jp] )
+ ( vol[ijk] - vol[kp] ) * ( vol[ijk] - vol[kp] ));
}
}
////////////MAIN HOST FUNCTION ///////////////
float cuda_tv_fgp_3D(float *vol, int iter, float lambdaPar, int dimX, int dimY, int dimZ, int gpuIndex)
{
// Set GPU Index
if (gpuIndex != -1) {
hipSetDevice(gpuIndex);
hipError_t err = hipGetLastError();
// Ignore errors caused by calling hipSetDevice multiple times
if (err != hipSuccess && err != hipErrorSetOnActiveProcess)
return false;
}
int nonneg = 1, methodTV = 0;
float multip, tv=1.0;
/*3D verson*/
int ImSize = dimX*dimY*dimZ;
float *d_input, *d_update=NULL, *P1=NULL, *P2=NULL, *P3=NULL;
// Look into, originally BLK(X/Y/Z)SIZE
dim3 dimBlock(BLKXSIZE,BLKXSIZE,BLKXSIZE);
dim3 dimGrid(idivup(dimX,BLKXSIZE), idivup(dimY,BLKXSIZE),idivup(dimZ,BLKXSIZE));
/*allocate space for images on device*/
hipMalloc((void**)&d_input,ImSize*sizeof(float));
hipMalloc((void**)&d_update,ImSize*sizeof(float));
hipMalloc((void**)&P1,ImSize*sizeof(float));
hipMalloc((void**)&P2,ImSize*sizeof(float));
hipMalloc((void**)&P3,ImSize*sizeof(float));
hipMemcpy(d_input,vol,ImSize*sizeof(float),hipMemcpyHostToDevice);
hipMemset(d_update, 0, ImSize*sizeof(float));
hipMemset(P1, 0, ImSize*sizeof(float));
hipMemset(P2, 0, ImSize*sizeof(float));
hipMemset(P3, 0, ImSize*sizeof(float));
// // Operators for Global Reductions
thrust::plus<float> binary_op;
// Measure TV (in this case d_update == tv_recon)
hipLaunchKernelGGL(( tv_kernel_3D), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input, d_update, dimX, dimY, dimZ);
hipDeviceSynchronize();
hipPeekAtLastError();
// Measure Norm of TV - Gradient
thrust::device_vector<float> tv_vec(d_update, d_update + ImSize);
tv = thrust::reduce(tv_vec.begin(), tv_vec.end(), 0.0f, binary_op);
/********************** Run CUDA 3D kernel here ********************/
multip = (1.0f/(26.0f*lambdaPar));
/* Main Loop */
for (int i = 0; i < iter; i++) {
/* computing the gradient of the objective function */
hipLaunchKernelGGL(( Obj_func3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input, d_update, P1, P2, P3, dimX, dimY, dimZ, ImSize, lambdaPar);
hipDeviceSynchronize();
hipPeekAtLastError();
// Apply Nonnegativity
if (nonneg != 0) {
hipLaunchKernelGGL(( nonneg3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, dimX, dimY, dimZ, ImSize);
hipDeviceSynchronize();
hipPeekAtLastError(); }
/*Taking a step towards minus of the gradient*/
hipLaunchKernelGGL(( Grad_func3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, P3, d_update, dimX, dimY, dimZ, ImSize, multip);
hipDeviceSynchronize();
hipPeekAtLastError();
/* projection step */
if (methodTV == 0)hipLaunchKernelGGL(( Proj_func3D_iso_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, P3, dimX, dimY, dimZ, ImSize); /* isotropic kernel */
elsehipLaunchKernelGGL(( Proj_func3D_aniso_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, P3, dimX, dimY, dimZ, ImSize); /* anisotropic kernel */
hipDeviceSynchronize();
hipPeekAtLastError();
}
/***************************************************************/
//copy result matrix from device to host memory
hipMemcpy(vol,d_update,ImSize*sizeof(float),hipMemcpyDeviceToHost);
hipFree(d_input);
hipFree(d_update);
hipFree(P1);
hipFree(P2);
hipFree(P3);
return tv;
}
| fe2681b867a593a86abc188f4f6715bbedd5978c.cu | /* CUDA implementation of FGP-TV [1] denoising/regularization model (2D/3D case)
*
* Input Parameters:
* 1. Noisy image/volume
* 2. lambdaPar - regularization parameter
* 3. Number of iterations
* 4. eplsilon: tolerance constant
* 5. TV-type: methodTV - 'iso' (0) or 'l1' (1)
* 6. nonneg: 'nonnegativity (0 is OFF by default)
*
* Output:
* [1] Filtered/regularized image/volume
* [2] Information vector which contains [iteration no., reached tolerance]
*
* This function is based on the Matlab's code and paper by
* [1] Amir Beck and Marc Teboulle, "Fast Gradient-Based Algorithms for Constrained Total Variation Image Denoising and Deblurring Problems"
*/
//
// tv_fgp.cu
//
// Created by Hovden Group on 9/1/20.
// Adapted from CCPI-Regularization-Toolkit
//
#include "tv_fgp.h"
#include "shared.h"
#include <thrust/functional.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/transform_reduce.h>
#include <cmath>
#include <stdio.h>
#define BLKXSIZE 8
#define MAX(x,y) (x>y?x:y)
#define MIN(x,y) (x<y?x:y)
#define ABS(x) (x>0?x:-x)
#define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) )
__global__ void Obj_func3D_kernel(float *Ad, float *D, float *R1, float *R2, float *R3, int N, int M, int Z, int ImSize, float lambda)
{
float val1, val2, val3;
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (Z*M)*i + Z*j + k;
if ((i < N) && (j < M) && (k < Z)) {
if (i <= 0) {val1 = 0.0f;} else {val1 = R1[(Z*M)*(i-1) + Z*j + k];}
if (j <= 0) {val2 = 0.0f;} else {val2 = R2[(Z*M)*i + Z*(j-1) + k];}
if (k <= 0) {val3 = 0.0f;} else {val3 = R3[(Z*M)*i + Z*j + (k-1)];}
//Write final result to global memory
D[index] = Ad[index] - lambda*(R1[index] + R2[index] + R3[index] - val1 - val2 - val3);
}
return;
}
__global__ void Grad_func3D_kernel(float *P1, float *P2, float *P3, float *D, int N, int M, int Z, int ImSize, float multip)
{
float val1,val2,val3;
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (Z*M)*i + Z*j + k;
if ((i < N) && (j < M) && (k < Z)) {
// boundary conditions
if (i >= N-1) val1 = 0.0f; else val1 = D[index] - D[(Z*M)*(i+1) + Z*j + k];
if (j >= M-1) val2 = 0.0f; else val2 = D[index] - D[(Z*M)*i + Z*(j+1) + k];
if (k >= Z-1) val3 = 0.0f; else val3 = D[index] - D[(Z*M)*i + Z*j + (k+1)];
//Write final result to global memory
P1[index] += multip * val1;
P2[index] += multip * val2;
P3[index] += multip * val3;
}
return;
}
__global__ void Proj_func3D_iso_kernel(float *P1, float *P2, float *P3, int N, int M, int Z, int ImSize)
{
float denom,sq_denom;
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (Z*M)*i + Z*j + k;
if ((i < N) && (j < M) && (k < Z)) {
denom = pow(P1[index],2) + pow(P2[index],2) + pow(P3[index],2);
if (denom > 1.0f) {
sq_denom = 1.0f/sqrt(denom);
P1[index] = P1[index]*sq_denom;
P2[index] = P2[index]*sq_denom;
P3[index] = P3[index]*sq_denom;
}
}
return;
}
__global__ void Proj_func3D_aniso_kernel(float *P1, float *P2, float *P3, int N, int M, int Z, int ImSize)
{
float val1, val2, val3;
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (Z*M)*i + Z*j + k;
if ((i < N) && (j < M) && (k < Z)) {
val1 = abs(P1[index]);
val2 = abs(P2[index]);
val3 = abs(P3[index]);
if (val1 < 1.0f) {val1 = 1.0f;}
if (val2 < 1.0f) {val2 = 1.0f;}
if (val3 < 1.0f) {val3 = 1.0f;}
P1[index] = P1[index]/val1;
P2[index] = P2[index]/val2;
P3[index] = P3[index]/val3;
}
return;
}
__global__ void nonneg3D_kernel(float* Output, int N, int M, int Z, int num_total)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (Z*M)*i + Z*j + k;
if (index < num_total) {
if (Output[index] < 0.0f) Output[index] = 0.0f;
}
}
__global__ void FGPResidCalc3D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int Z, int num_total)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (Z*M)*i + Z*j + k;
if (index < num_total) {
Output[index] = Input1[index] - Input2[index];
}
}
//Measure Reconstruction's TV.
__global__ void tv_kernel_3D(float *vol, float *tv_recon, int nx, int ny, int nz)
{
float eps = 1e-6;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int ijk = (ny*nz)*i + nz*j + k;
int ip = (ny*nz)*((i+1)%nx) + nz*j + k;
int jp = (ny*nz)*i + nz*((j+1)%ny) + k;
int kp = (ny*nz)*i + nz*j + ((k+1)%nz);
if ((i < nx) && (j < ny) && (k < nz)) {
tv_recon[ijk] = sqrt(eps + ( vol[ijk] - vol[ip] ) * ( vol[ijk] - vol[ip] )
+ ( vol[ijk] - vol[jp] ) * ( vol[ijk] - vol[jp] )
+ ( vol[ijk] - vol[kp] ) * ( vol[ijk] - vol[kp] ));
}
}
////////////MAIN HOST FUNCTION ///////////////
float cuda_tv_fgp_3D(float *vol, int iter, float lambdaPar, int dimX, int dimY, int dimZ, int gpuIndex)
{
// Set GPU Index
if (gpuIndex != -1) {
cudaSetDevice(gpuIndex);
cudaError_t err = cudaGetLastError();
// Ignore errors caused by calling cudaSetDevice multiple times
if (err != cudaSuccess && err != cudaErrorSetOnActiveProcess)
return false;
}
int nonneg = 1, methodTV = 0;
float multip, tv=1.0;
/*3D verson*/
int ImSize = dimX*dimY*dimZ;
float *d_input, *d_update=NULL, *P1=NULL, *P2=NULL, *P3=NULL;
// Look into, originally BLK(X/Y/Z)SIZE
dim3 dimBlock(BLKXSIZE,BLKXSIZE,BLKXSIZE);
dim3 dimGrid(idivup(dimX,BLKXSIZE), idivup(dimY,BLKXSIZE),idivup(dimZ,BLKXSIZE));
/*allocate space for images on device*/
cudaMalloc((void**)&d_input,ImSize*sizeof(float));
cudaMalloc((void**)&d_update,ImSize*sizeof(float));
cudaMalloc((void**)&P1,ImSize*sizeof(float));
cudaMalloc((void**)&P2,ImSize*sizeof(float));
cudaMalloc((void**)&P3,ImSize*sizeof(float));
cudaMemcpy(d_input,vol,ImSize*sizeof(float),cudaMemcpyHostToDevice);
cudaMemset(d_update, 0, ImSize*sizeof(float));
cudaMemset(P1, 0, ImSize*sizeof(float));
cudaMemset(P2, 0, ImSize*sizeof(float));
cudaMemset(P3, 0, ImSize*sizeof(float));
// // Operators for Global Reductions
thrust::plus<float> binary_op;
// Measure TV (in this case d_update == tv_recon)
tv_kernel_3D<<<dimGrid,dimBlock>>>(d_input, d_update, dimX, dimY, dimZ);
cudaDeviceSynchronize();
cudaPeekAtLastError();
// Measure Norm of TV - Gradient
thrust::device_vector<float> tv_vec(d_update, d_update + ImSize);
tv = thrust::reduce(tv_vec.begin(), tv_vec.end(), 0.0f, binary_op);
/********************** Run CUDA 3D kernel here ********************/
multip = (1.0f/(26.0f*lambdaPar));
/* Main Loop */
for (int i = 0; i < iter; i++) {
/* computing the gradient of the objective function */
Obj_func3D_kernel<<<dimGrid,dimBlock>>>(d_input, d_update, P1, P2, P3, dimX, dimY, dimZ, ImSize, lambdaPar);
cudaDeviceSynchronize();
cudaPeekAtLastError();
// Apply Nonnegativity
if (nonneg != 0) {
nonneg3D_kernel<<<dimGrid,dimBlock>>>(d_update, dimX, dimY, dimZ, ImSize);
cudaDeviceSynchronize();
cudaPeekAtLastError(); }
/*Taking a step towards minus of the gradient*/
Grad_func3D_kernel<<<dimGrid,dimBlock>>>(P1, P2, P3, d_update, dimX, dimY, dimZ, ImSize, multip);
cudaDeviceSynchronize();
cudaPeekAtLastError();
/* projection step */
if (methodTV == 0) Proj_func3D_iso_kernel<<<dimGrid,dimBlock>>>(P1, P2, P3, dimX, dimY, dimZ, ImSize); /* isotropic kernel */
else Proj_func3D_aniso_kernel<<<dimGrid,dimBlock>>>(P1, P2, P3, dimX, dimY, dimZ, ImSize); /* anisotropic kernel */
cudaDeviceSynchronize();
cudaPeekAtLastError();
}
/***************************************************************/
//copy result matrix from device to host memory
cudaMemcpy(vol,d_update,ImSize*sizeof(float),cudaMemcpyDeviceToHost);
cudaFree(d_input);
cudaFree(d_update);
cudaFree(P1);
cudaFree(P2);
cudaFree(P3);
return tv;
}
|
f4bc77313df81141f2e38669b9ee1eacc2c3605d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* jacobi.c - Poisson problem in 3d
*
*/
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
__host__ __device__
void jacobi(int N, double ***u, double ***v, double ***f, int iter_max) {
//double stopTest = 100000;
int counter =0;
int i,j,k;
//while(stopTest>tolerance && counter < iter_max){
// stopTest =0.0;
do{
#pragma omp parallel default(none) shared(u, v, f, N) private(i, j, k)
{
#pragma omp for
for( i =0; i < N; i++){
for( j = 0; j < N; j++){
for( k = 0; k < N; k++){
v[i][j][k] = u[i][j][k];
}
}
}
// #pragma omp for reduction(+: stopTest)
for( i =1; i < N-1; i++){
for( j = 1; j < N-1; j++){
for( k = 1; k < N-1; k++){
u[i][j][k] = 1./6.*(v[i-1][j][k]+v[i+1][j][k]+v[i][j-1][k]+v[i][j+1][k]+v[i][j][k-1]+v[i][j][k+1] + 1./((N)*(N)) * f[i][j][k]); //formula and matrix
// stopTest +=(u[i][j][k]-v[i][j][k])*(u[i][j][k]-v[i][j][k]);
}
}
}
} //End Parallel
counter++;
//}
} while (counter <iter_max);
}
// Kernel to be launched on a single thread
__global__
void jacobi_serial(int N, double ***u, double ***v, double ***f, int iter_max)
{
jacobi(N, u, v, f, iter_max);
}
| f4bc77313df81141f2e38669b9ee1eacc2c3605d.cu | /* jacobi.c - Poisson problem in 3d
*
*/
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
__host__ __device__
void jacobi(int N, double ***u, double ***v, double ***f, int iter_max) {
//double stopTest = 100000;
int counter =0;
int i,j,k;
//while(stopTest>tolerance && counter < iter_max){
// stopTest =0.0;
do{
#pragma omp parallel default(none) shared(u, v, f, N) private(i, j, k)
{
#pragma omp for
for( i =0; i < N; i++){
for( j = 0; j < N; j++){
for( k = 0; k < N; k++){
v[i][j][k] = u[i][j][k];
}
}
}
// #pragma omp for reduction(+: stopTest)
for( i =1; i < N-1; i++){
for( j = 1; j < N-1; j++){
for( k = 1; k < N-1; k++){
u[i][j][k] = 1./6.*(v[i-1][j][k]+v[i+1][j][k]+v[i][j-1][k]+v[i][j+1][k]+v[i][j][k-1]+v[i][j][k+1] + 1./((N)*(N)) * f[i][j][k]); //formula and matrix
// stopTest +=(u[i][j][k]-v[i][j][k])*(u[i][j][k]-v[i][j][k]);
}
}
}
} //End Parallel
counter++;
//}
} while (counter <iter_max);
}
// Kernel to be launched on a single thread
__global__
void jacobi_serial(int N, double ***u, double ***v, double ***f, int iter_max)
{
jacobi(N, u, v, f, iter_max);
}
|
a2cc87100b4dc2e0ff4e8e7b858bebac5167e502.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "find_min_max.h"
#include <stk/cuda/cuda.h>
#include <stk/cuda/ptr.h>
#include <stk/image/gpu_volume.h>
#include <algorithm>
#include <cfloat>
namespace cuda = stk::cuda;
// Implementation 1
// Interleaved addressing w/o any optimization
__global__ void algo1_reduce_volume_min_max(
const cuda::VolumePtr<float> in,
dim3 dims,
float2* out)
{
extern __shared__ float2 shared[];
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int z = blockIdx.z*blockDim.z + threadIdx.z;
int tid = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y;
int bid = blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.x * gridDim.y;
shared[tid] = {FLT_MAX, -FLT_MAX};
if (x < dims.x &&
y < dims.y &&
z < dims.z) {
shared[tid].x = in(x,y,z);
shared[tid].y = in(x,y,z);
}
__syncthreads();
for (unsigned int s=1; s < blockDim.x*blockDim.y*blockDim.z; s *= 2)
{
if ((tid % (2*s)) == 0)
{
shared[tid].x = min(shared[tid].x, shared[tid + s].x);
shared[tid].y = max(shared[tid].y, shared[tid + s].y);
}
__syncthreads();
}
if (tid == 0) out[bid] = shared[0];
}
__global__ void algo1_reduce_min_max(
unsigned int n,
float2* in,
float2* out)
{
extern __shared__ float2 shared[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
if (i < n) shared[tid] = in[i];
else shared[tid] = {FLT_MAX, -FLT_MAX};
__syncthreads();
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
if ((tid % (2*s)) == 0)
{
shared[tid].x = min(shared[tid].x, shared[tid + s].x);
shared[tid].y = max(shared[tid].y, shared[tid + s].y);
}
__syncthreads();
}
if (tid == 0) out[blockIdx.x] = shared[0];
}
void find_min_max_1(stk::GpuVolume& vol, float& min, float& max)
{
dim3 block_size{8,8,8};
dim3 grid_size {
(vol.size().x + block_size.x - 1) / block_size.x,
(vol.size().y + block_size.y - 1) / block_size.y,
(vol.size().z + block_size.z - 1) / block_size.z
};
// Number of blocks (or values in the active buffer)
uint32_t n = grid_size.x * grid_size.y * grid_size.z;
// Allocate our global buffers
float2* d_out;
CUDA_CHECK_ERRORS(hipMalloc(&d_out, 2*n*sizeof(float)));
float2* d_in;
CUDA_CHECK_ERRORS(hipMalloc(&d_in, 2*n*sizeof(float)));
hipLaunchKernelGGL(( algo1_reduce_volume_min_max), dim3(grid_size), dim3(block_size),
uint32_t(2*sizeof(float)*512), 0,
vol, vol.size(), d_out
);
CUDA_CHECK_ERRORS(hipPeekAtLastError());
CUDA_CHECK_ERRORS(hipDeviceSynchronize());
while (n > 1) {
// block_count should always be pow2 as it follows the gridsize from
// previous step
uint32_t n_threads = std::min<uint32_t>(n, 1024);
uint32_t n_blocks = (n + n_threads - 1) / n_threads;
CUDA_CHECK_ERRORS(hipMemcpy(d_in, d_out, 2*n*sizeof(float),
hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( algo1_reduce_min_max), dim3({n_blocks),dim3(1),1}, {n_threads,1,1},
uint32_t(2*sizeof(float)*n_threads),
n, d_in, d_out);
CUDA_CHECK_ERRORS(hipPeekAtLastError());
CUDA_CHECK_ERRORS(hipDeviceSynchronize());
n = n_blocks;
}
float2 min_max;
CUDA_CHECK_ERRORS(hipMemcpy(&min_max, d_out, 2*sizeof(float), hipMemcpyDeviceToHost));
min = min_max.x;
max = min_max.y;
CUDA_CHECK_ERRORS(hipFree(d_in));
CUDA_CHECK_ERRORS(hipFree(d_out));
}
| a2cc87100b4dc2e0ff4e8e7b858bebac5167e502.cu | #include "find_min_max.h"
#include <stk/cuda/cuda.h>
#include <stk/cuda/ptr.h>
#include <stk/image/gpu_volume.h>
#include <algorithm>
#include <cfloat>
namespace cuda = stk::cuda;
// Implementation 1
// Interleaved addressing w/o any optimization
__global__ void algo1_reduce_volume_min_max(
const cuda::VolumePtr<float> in,
dim3 dims,
float2* out)
{
extern __shared__ float2 shared[];
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int z = blockIdx.z*blockDim.z + threadIdx.z;
int tid = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y;
int bid = blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.x * gridDim.y;
shared[tid] = {FLT_MAX, -FLT_MAX};
if (x < dims.x &&
y < dims.y &&
z < dims.z) {
shared[tid].x = in(x,y,z);
shared[tid].y = in(x,y,z);
}
__syncthreads();
for (unsigned int s=1; s < blockDim.x*blockDim.y*blockDim.z; s *= 2)
{
if ((tid % (2*s)) == 0)
{
shared[tid].x = min(shared[tid].x, shared[tid + s].x);
shared[tid].y = max(shared[tid].y, shared[tid + s].y);
}
__syncthreads();
}
if (tid == 0) out[bid] = shared[0];
}
__global__ void algo1_reduce_min_max(
unsigned int n,
float2* in,
float2* out)
{
extern __shared__ float2 shared[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
if (i < n) shared[tid] = in[i];
else shared[tid] = {FLT_MAX, -FLT_MAX};
__syncthreads();
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
if ((tid % (2*s)) == 0)
{
shared[tid].x = min(shared[tid].x, shared[tid + s].x);
shared[tid].y = max(shared[tid].y, shared[tid + s].y);
}
__syncthreads();
}
if (tid == 0) out[blockIdx.x] = shared[0];
}
void find_min_max_1(stk::GpuVolume& vol, float& min, float& max)
{
dim3 block_size{8,8,8};
dim3 grid_size {
(vol.size().x + block_size.x - 1) / block_size.x,
(vol.size().y + block_size.y - 1) / block_size.y,
(vol.size().z + block_size.z - 1) / block_size.z
};
// Number of blocks (or values in the active buffer)
uint32_t n = grid_size.x * grid_size.y * grid_size.z;
// Allocate our global buffers
float2* d_out;
CUDA_CHECK_ERRORS(cudaMalloc(&d_out, 2*n*sizeof(float)));
float2* d_in;
CUDA_CHECK_ERRORS(cudaMalloc(&d_in, 2*n*sizeof(float)));
algo1_reduce_volume_min_max<<<grid_size, block_size,
uint32_t(2*sizeof(float)*512)>>>(
vol, vol.size(), d_out
);
CUDA_CHECK_ERRORS(cudaPeekAtLastError());
CUDA_CHECK_ERRORS(cudaDeviceSynchronize());
while (n > 1) {
// block_count should always be pow2 as it follows the gridsize from
// previous step
uint32_t n_threads = std::min<uint32_t>(n, 1024);
uint32_t n_blocks = (n + n_threads - 1) / n_threads;
CUDA_CHECK_ERRORS(cudaMemcpy(d_in, d_out, 2*n*sizeof(float),
cudaMemcpyDeviceToDevice));
algo1_reduce_min_max<<<{n_blocks,1,1}, {n_threads,1,1},
uint32_t(2*sizeof(float)*n_threads)>>>(
n, d_in, d_out);
CUDA_CHECK_ERRORS(cudaPeekAtLastError());
CUDA_CHECK_ERRORS(cudaDeviceSynchronize());
n = n_blocks;
}
float2 min_max;
CUDA_CHECK_ERRORS(cudaMemcpy(&min_max, d_out, 2*sizeof(float), cudaMemcpyDeviceToHost));
min = min_max.x;
max = min_max.y;
CUDA_CHECK_ERRORS(cudaFree(d_in));
CUDA_CHECK_ERRORS(cudaFree(d_out));
}
|
11ee786f6f55dd4d188b242c359a51d54e788cd3.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <hip/hip_complex.h>
#include <rocblas.h>
#include <stdio.h>
#include <algorithm>
#include <functional>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "update_ops_cuda.h"
#include "util.cuh"
#include "util_func.h"
#include "util_type.h"
#include "util_type_internal.h"
// maximum # of GTYPE elements allocating on constant memory: 4096
__constant__ GTYPE matrix_const_gpu[1024];
__constant__ ITYPE matrix_mask_list_gpu[1024];
__constant__ UINT sorted_insert_index_list_gpu[15];
__constant__ UINT target_index_list_gpu[15];
/** vqcsim
* perform multi_qubit_Pauli_gate with XZ mask.
*
* This function assumes bit_flip_mask is not 0, i.e., at least one bit is
* flipped. If no bit is flipped, use multi_qubit_Pauli_gate_Z_mask. This
* function update the quantum state with Pauli operation. bit_flip_mask,
* phase_flip_mask, global_phase_90rot_count, and pivot_qubit_index must be
* computed before calling this function. See get_masks_from_*_list for the
* above four arguemnts.
*/
// void multi_qubit_Pauli_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE
// phase_flip_mask, UINT global_phase_90rot_count,UINT pivot_qubit_index,
// CPPCTYPE* state, ITYPE dim); void
// multi_qubit_Pauli_rotation_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE
// phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index,
// double angle, CPPCTYPE* state, ITYPE dim); void
// multi_qubit_Pauli_gate_Z_mask(ITYPE phase_flip_mask, CPPCTYPE* state, ITYPE
// dim); void multi_qubit_Pauli_rotation_gate_Z_mask(ITYPE phase_flip_mask,
// double angle, CPPCTYPE* state, ITYPE dim);
__device__ double atomicAdd_double_duplicate(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__global__ void penta_qubit_dense_matrix_gate_gpu(GTYPE* state_gpu, ITYPE dim) {
__shared__ GTYPE state_basis[1024];
GTYPE tmp = make_cuDoubleComplex(0.0, 0.0);
ITYPE loop_dim = dim >> 5;
ITYPE basis = blockIdx.x * blockDim.x + threadIdx.x;
int y;
if (basis < loop_dim) {
for (y = 0; y < 5; ++y)
basis = insert_zero_to_basis_index_device(
basis, sorted_insert_index_list_gpu[y]);
for (y = 0; y < 5; ++y)
basis +=
(1ULL << target_index_list_gpu[y]) * ((threadIdx.y >> y) & 1);
state_basis[(threadIdx.x << 5) + threadIdx.y] = state_gpu[basis];
__syncthreads();
for (y = 0; y < 32; ++y)
tmp = cuCadd(tmp, cuCmul(matrix_const_gpu[(threadIdx.y << 5) + y],
state_basis[(threadIdx.x << 5) + y]));
state_gpu[basis] = tmp;
}
}
__host__ void penta_qubit_dense_matrix_gate_host(
const unsigned int target_qubit_index[5], const CPPCTYPE matrix[1024],
void* state, ITYPE dim, void* stream, UINT device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice((int)device_number);
hipStream_t* cuda_stream = reinterpret_cast<hipStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
hipError_t cudaStatus;
checkCudaErrors(
hipMemcpyToSymbolAsync(matrix_const_gpu, matrix, sizeof(GTYPE) * 1024,
0, hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
ITYPE loop_dim = dim >> 5;
dim3 block;
block.y = 32;
block.x = loop_dim <= 32 ? loop_dim : 32;
unsigned int grid = loop_dim / block.x;
checkCudaErrors(
hipMemcpyToSymbolAsync(target_index_list_gpu, target_qubit_index,
sizeof(UINT) * 5, 0, hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
unsigned int sort_list[5];
memcpy(sort_list, target_qubit_index, sizeof(unsigned int) * 5);
std::sort(sort_list, sort_list + 5);
checkCudaErrors(
hipMemcpyToSymbolAsync(sorted_insert_index_list_gpu, sort_list,
sizeof(UINT) * 5, 0, hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
hipLaunchKernelGGL(( penta_qubit_dense_matrix_gate_gpu), dim3(grid), dim3(block), 0, *cuda_stream,
state_gpu, dim);
checkCudaErrors(hipStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
cudaStatus = hipGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
}
__global__ void quad_qubit_dense_matrix_gate_shared_gpu(
GTYPE* state_gpu, ITYPE dim) {
__shared__ GTYPE state_basis[1024];
GTYPE tmp = make_cuDoubleComplex(0.0, 0.0);
ITYPE loop_dim = dim >> 4;
ITYPE basis = blockIdx.x * blockDim.x + threadIdx.x;
int y;
if (basis < loop_dim) {
for (y = 0; y < 4; ++y)
basis = insert_zero_to_basis_index_device(
basis, sorted_insert_index_list_gpu[y]);
for (y = 0; y < 4; ++y)
basis += (1ULL << sorted_insert_index_list_gpu[y]) *
((threadIdx.y >> y) & 1);
state_basis[(threadIdx.x << 4) + y] = state_gpu[basis];
__syncthreads();
for (y = 0; y < 16; ++y)
tmp =
cuCadd(tmp, cuCmul(matrix_const_gpu[(threadIdx.y << 4) + y],
state_basis[(threadIdx.x << 4) + threadIdx.y]));
state_gpu[basis] = tmp;
}
}
__global__ void quad_qubit_dense_matrix_gate_gpu(
unsigned int target0_qubit_index, unsigned int target1_qubit_index,
unsigned int target2_qubit_index, unsigned int target3_qubit_index,
unsigned int sorted_index0, unsigned int sorted_index1,
unsigned int sorted_index2, unsigned int sorted_index3, GTYPE* state_gpu,
ITYPE dim) {
// ITYPE basis0;
ITYPE basis[16];
GTYPE d_buffer[16];
ITYPE loop_dim = dim >> 4;
ITYPE basis0 = blockIdx.x * blockDim.x + threadIdx.x;
int x, y;
if (basis0 < loop_dim) {
// basis0 = j;
// create base index
basis0 = insert_zero_to_basis_index_device(basis0, sorted_index0);
basis0 = insert_zero_to_basis_index_device(basis0, sorted_index1);
basis0 = insert_zero_to_basis_index_device(basis0, sorted_index2);
basis0 = insert_zero_to_basis_index_device(basis0, sorted_index3);
basis[0] = basis0; // 0000
basis[1] = basis0 + (1ULL << target0_qubit_index); // 0001
basis[2] = basis0 + (1ULL << target1_qubit_index); // 0010
basis[3] = basis0 + (1ULL << target1_qubit_index) +
(1ULL << target0_qubit_index); // 0011
basis[4] = basis0 + (1ULL << target2_qubit_index); // 0100
basis[5] = basis0 + (1ULL << target2_qubit_index) +
(1ULL << target0_qubit_index); // 0101
basis[6] = basis0 + (1ULL << target2_qubit_index) +
(1ULL << target1_qubit_index); // 0110
basis[7] = basis0 + (1ULL << target2_qubit_index) +
(1ULL << target1_qubit_index) +
(1ULL << target0_qubit_index); // 0111
basis[8] = basis0 + (1ULL << target3_qubit_index); // 1000
basis[9] = basis0 + (1ULL << target3_qubit_index) +
(1ULL << target0_qubit_index); // 1001
basis[10] = basis0 + (1ULL << target3_qubit_index) +
(1ULL << target1_qubit_index); // 1010
basis[11] = basis0 + (1ULL << target3_qubit_index) +
(1ULL << target1_qubit_index) +
(1ULL << target0_qubit_index); // 1011
basis[12] = basis0 + (1ULL << target3_qubit_index) +
(1ULL << target2_qubit_index); // 1100
basis[13] = basis0 + (1ULL << target3_qubit_index) +
(1ULL << target2_qubit_index) +
(1ULL << target0_qubit_index); // 1101
basis[14] = basis0 + (1ULL << target3_qubit_index) +
(1ULL << target2_qubit_index) +
(1ULL << target1_qubit_index); // 1110
basis[15] = basis0 + (1ULL << target3_qubit_index) +
(1ULL << target2_qubit_index) +
(1ULL << target1_qubit_index) +
(1ULL << target0_qubit_index); // 1111
for (y = 0; y < 16; ++y) {
d_buffer[y] = make_cuDoubleComplex(0.0, 0.0);
for (x = 0; x < 16; ++x) {
d_buffer[y] = cuCadd(d_buffer[y],
cuCmul(matrix_const_gpu[y * 16 + x], state_gpu[basis[x]]));
}
}
for (y = 0; y < 16; ++y) {
state_gpu[basis[y]] = d_buffer[y];
}
}
}
__host__ void quad_qubit_dense_matrix_gate_host(
const unsigned int target_qubit_index[4], const CPPCTYPE matrix[256],
void* state, ITYPE dim, void* stream, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice((int)device_number);
hipStream_t* cuda_stream = reinterpret_cast<hipStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
hipError_t cudaStatus;
checkCudaErrors(
hipMemcpyToSymbolAsync(matrix_const_gpu, matrix, sizeof(GTYPE) * 256,
0, hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
ITYPE loop_dim = dim >> 4;
unsigned int block = loop_dim <= 512 ? loop_dim : 512;
unsigned int grid = loop_dim / block;
unsigned int target0_qubit_index, target1_qubit_index, target2_qubit_index,
target3_qubit_index;
target0_qubit_index = target_qubit_index[0];
target1_qubit_index = target_qubit_index[1];
target2_qubit_index = target_qubit_index[2];
target3_qubit_index = target_qubit_index[3];
unsigned int sort_list[4];
memcpy(sort_list, target_qubit_index, sizeof(unsigned int) * 4);
std::sort(sort_list, sort_list + 4);
hipLaunchKernelGGL(( quad_qubit_dense_matrix_gate_gpu), dim3(grid), dim3(block), 0, *cuda_stream,
target0_qubit_index, target1_qubit_index, target2_qubit_index,
target3_qubit_index, sort_list[0], sort_list[1], sort_list[2],
sort_list[3], state_gpu, dim);
checkCudaErrors(hipStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
cudaStatus = hipGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
/*
dim3 block;
block.y = 16;
block.x = loop_dim <= 64 ? loop_dim : 64;
unsigned int grid = loop_dim / block.x;
checkCudaErrors(hipMemcpyToSymbol(sorted_insert_index_list_gpu,
target_qubit_index, sizeof(UINT)*4), __FILE__, __LINE__);
quad_qubit_dense_matrix_gate_shared_gpu << <grid, block >> >(state_gpu,
dim);
checkCudaErrors(hipDeviceSynchronize(), __FILE__, __LINE__);
cudaStatus = hipGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
*/
}
// target qubit 0 < target qubit 1 < target qubit 2
__global__ void triple_qubit_dense_matrix_gate_gpu(
unsigned int target0_qubit_index, unsigned int target1_qubit_index,
unsigned int target2_qubit_index, unsigned int small, unsigned int mid,
unsigned int large, GTYPE* state_gpu, ITYPE dim) {
ITYPE basis[8];
GTYPE d_buffer[8];
ITYPE loop_dim = dim >> 3;
ITYPE basis0 = blockIdx.x * blockDim.x + threadIdx.x;
int x, y;
if (basis0 < loop_dim) {
// create base index
basis0 = insert_zero_to_basis_index_device(basis0, small);
basis0 = insert_zero_to_basis_index_device(basis0, mid);
basis0 = insert_zero_to_basis_index_device(basis0, large);
basis[0] = basis0; // 000
basis[1] = basis0 + (1ULL << target0_qubit_index); // 001
basis[2] = basis0 + (1ULL << target1_qubit_index); // 010
basis[3] = basis0 + (1ULL << target1_qubit_index) +
(1ULL << target0_qubit_index); // 011
basis[4] = basis0 + (1ULL << target2_qubit_index); // 100
basis[5] = basis0 + (1ULL << target2_qubit_index) +
(1ULL << target0_qubit_index); // 101
basis[6] = basis0 + (1ULL << target2_qubit_index) +
(1ULL << target1_qubit_index); // 110
basis[7] = basis0 + (1ULL << target2_qubit_index) +
(1ULL << target1_qubit_index) +
(1ULL << target0_qubit_index); // 111
for (y = 0; y < 8; ++y) {
d_buffer[y] = make_cuDoubleComplex(0.0, 0.0);
for (x = 0; x < 8; ++x) {
d_buffer[y] = cuCadd(d_buffer[y],
cuCmul(matrix_const_gpu[y * 8 + x], state_gpu[basis[x]]));
}
}
for (y = 0; y < 8; ++y) state_gpu[basis[y]] = d_buffer[y];
}
}
__host__ void triple_qubit_dense_matrix_gate_host(
unsigned int target0_qubit_index, unsigned int target1_qubit_index,
unsigned int target2_qubit_index, const CPPCTYPE matrix[64], void* state,
ITYPE dim, void* stream, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice((int)device_number);
hipStream_t* cuda_stream = reinterpret_cast<hipStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
hipError_t cudaStatus;
checkCudaErrors(
hipMemcpyToSymbolAsync(matrix_const_gpu, matrix, sizeof(GTYPE) * 64, 0,
hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
// (not using shared memory)
ITYPE loop_dim = dim >> 3;
unsigned int block = loop_dim <= 512 ? loop_dim : 512;
unsigned int grid = loop_dim / block;
unsigned int small, mid, large, tmp;
small = target0_qubit_index;
mid = target1_qubit_index;
large = target2_qubit_index;
if (small > mid) {
tmp = small;
small = mid;
mid = tmp;
}
if (mid > large) {
tmp = large;
large = mid;
mid = tmp;
}
if (small > mid) {
tmp = small;
small = mid;
mid = tmp;
}
hipLaunchKernelGGL(( triple_qubit_dense_matrix_gate_gpu), dim3(grid), dim3(block), 0, *cuda_stream,
target0_qubit_index, target1_qubit_index, target2_qubit_index, small,
mid, large, state_gpu, dim);
checkCudaErrors(hipStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
cudaStatus = hipGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
}
// target1 qubit index > target0 qubit index
__global__ void double_qubit_dense_matrix_gate_gpu(
unsigned int target0_qubit_index, unsigned int target1_qubit_index,
unsigned int small, unsigned int large, GTYPE* state_gpu, ITYPE dim) {
// unsigned int left, right;
ITYPE head, body, tail, basis0;
ITYPE basis[4];
GTYPE d_buffer[4];
ITYPE quad_dim = dim >> 2;
ITYPE j = blockIdx.x * blockDim.x + threadIdx.x;
int x, y;
/*
if (target1_qubit_index > target2_qubit_index){
left = target1_qubit_index;
right = target2_qubit_index;
}
else {
left = target2_qubit_index;
right = target1_qubit_index;
}
*/
// target1 qubit index > target2 qubit index
if (j < quad_dim) {
head = j >> (large - 1);
body =
(j & ((1ULL << (large - 1)) - 1)) >> small; // (j % 2^(k-1)) >> i
tail = j & ((1ULL << small) - 1); // j%(2^i)
basis0 = (head << (large + 1)) + (body << (small + 1)) + tail;
basis[0] = basis0;
basis[1] = basis0 + (1ULL << target0_qubit_index);
basis[2] = basis0 + (1ULL << target1_qubit_index);
basis[3] = basis0 + (1ULL << target1_qubit_index) +
(1ULL << target0_qubit_index);
for (y = 0; y < 4; ++y) {
d_buffer[y] = make_cuDoubleComplex(0.0, 0.0);
for (x = 0; x < 4; ++x) {
d_buffer[y] = cuCadd(d_buffer[y],
cuCmul(matrix_const_gpu[y * 4 + x], state_gpu[basis[x]]));
}
}
for (y = 0; y < 4; ++y) state_gpu[basis[y]] = d_buffer[y];
}
}
__host__ void double_qubit_dense_matrix_gate_host(
unsigned int target0_qubit_index, unsigned int target1_qubit_index,
const CPPCTYPE matrix[16], void* state, ITYPE dim, void* stream,
unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice((int)device_number);
hipStream_t* cuda_stream = reinterpret_cast<hipStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
hipError_t cudaStatus;
checkCudaErrors(
hipMemcpyToSymbolAsync(matrix_const_gpu, matrix, sizeof(GTYPE) * 16, 0,
hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
ITYPE quad_dim = dim >> 2;
unsigned int block = quad_dim <= 1024 ? quad_dim : 1024;
unsigned int grid = quad_dim / block;
unsigned int small;
unsigned int large;
small = (target0_qubit_index < target1_qubit_index) ? target0_qubit_index
: target1_qubit_index;
large = (target0_qubit_index < target1_qubit_index) ? target1_qubit_index
: target0_qubit_index;
hipLaunchKernelGGL(( double_qubit_dense_matrix_gate_gpu), dim3(grid), dim3(block), 0, *cuda_stream,
target0_qubit_index, target1_qubit_index, small, large, state_gpu, dim);
checkCudaErrors(hipStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
cudaStatus = hipGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
}
// multi_qubit_PauliZ_gate
__device__ void multi_qubit_Pauli_gate_Z_mask_device(
ITYPE phase_flip_mask, GTYPE* state_gpu, ITYPE dim) {
ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x;
// loop varaibles
const ITYPE loop_dim = dim;
if (state_index < loop_dim) {
// determine parity
// UINT bit1_num = popcount64(state_index & phase_flip_mask);
UINT bit1_num = __popcll(state_index & phase_flip_mask);
// set values
if (bit1_num & 1)
state_gpu[state_index] =
make_cuDoubleComplex(-1 * cuCreal(state_gpu[state_index]),
-1 * cuCimag(state_gpu[state_index]));
}
}
__global__ void multi_qubit_Pauli_gate_Z_mask_gpu(
ITYPE phase_flip_mask, GTYPE* state_gpu, ITYPE dim) {
multi_qubit_Pauli_gate_Z_mask_device(phase_flip_mask, state_gpu, dim);
}
__host__ void multi_qubit_Pauli_gate_Z_mask_host(ITYPE phase_flip_mask,
void* state, ITYPE dim, void* stream, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice((int)device_number);
hipStream_t* cuda_stream = reinterpret_cast<hipStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
hipError_t cudaStatus;
unsigned int block = dim <= 1024 ? dim : 1024;
unsigned int grid = dim / block;
hipLaunchKernelGGL(( multi_qubit_Pauli_gate_Z_mask_gpu), dim3(grid), dim3(block), 0, *cuda_stream,
phase_flip_mask, state_gpu, dim);
checkCudaErrors(hipStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
cudaStatus = hipGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
}
__device__ void multi_qubit_Pauli_gate_XZ_mask_device(ITYPE bit_flip_mask,
ITYPE phase_flip_mask, UINT global_phase_90rot_count,
UINT pivot_qubit_index, GTYPE* state_gpu, ITYPE dim) {
ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x;
// loop varaibles
const ITYPE loop_dim = dim >> 1;
GTYPE PHASE_M90ROT[4] = {make_cuDoubleComplex(1.0, 0.0),
make_cuDoubleComplex(0.0, -1), make_cuDoubleComplex(-1, 0.0),
make_cuDoubleComplex(0.0, 1)};
if (state_index < loop_dim) {
// create base index
ITYPE basis_0 =
insert_zero_to_basis_index_device(state_index, pivot_qubit_index);
// gather index
ITYPE basis_1 = basis_0 ^ bit_flip_mask;
// determine sign
unsigned int sign_0 = __popcll(basis_0 & phase_flip_mask) & 1;
unsigned int sign_1 = __popcll(basis_1 & phase_flip_mask) & 1;
// fetch values
GTYPE cval_0 = state_gpu[basis_0];
GTYPE cval_1 = state_gpu[basis_1];
// set values
state_gpu[basis_0] = cuCmul(cval_1,
PHASE_M90ROT[(global_phase_90rot_count + sign_0 * 2) &
3]); // a % 4 = a & (4-1)
state_gpu[basis_1] = cuCmul(cval_0,
PHASE_M90ROT[(global_phase_90rot_count + sign_1 * 2) &
3]); // a % 4 = a & (4-1)
}
}
__global__ void multi_qubit_Pauli_gate_XZ_mask_gpu(ITYPE bit_flip_mask,
ITYPE phase_flip_mask, UINT global_phase_90rot_count,
UINT pivot_qubit_index, GTYPE* state_gpu, ITYPE dim) {
multi_qubit_Pauli_gate_XZ_mask_device(bit_flip_mask, phase_flip_mask,
global_phase_90rot_count, pivot_qubit_index, state_gpu, dim);
}
__host__ void multi_qubit_Pauli_gate_XZ_mask_host(ITYPE bit_flip_mask,
ITYPE phase_flip_mask, UINT global_phase_90rot_count,
UINT pivot_qubit_index, void* state, ITYPE dim, void* stream,
unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice((int)device_number);
hipStream_t* cuda_stream = reinterpret_cast<hipStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
hipError_t cudaStatus;
unsigned int block = dim <= 1024 ? dim : 1024;
unsigned int grid = dim / block;
hipLaunchKernelGGL(( multi_qubit_Pauli_gate_XZ_mask_gpu), dim3(grid), dim3(block), 0, *cuda_stream,
bit_flip_mask, phase_flip_mask, global_phase_90rot_count,
pivot_qubit_index, state_gpu, dim);
checkCudaErrors(hipStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
cudaStatus = hipGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
}
__device__ void multi_qubit_Pauli_rotation_gate_XZ_mask_device(
ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count,
UINT pivot_qubit_index, double angle, GTYPE* state_gpu, ITYPE dim) {
ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x;
// loop varaibles
ITYPE loop_dim = dim >> 1;
// coefs
double cosval = cos(angle / 2);
double sinval = sin(angle / 2);
// GTYPE PHASE_90ROT[4] = {make_cuDoubleComplex(1.0,0.0),
// make_cuDoubleComplex(0.0,1.0), make_cuDoubleComplex(-1.0,0.0),
// make_cuDoubleComplex(0.0,-1.0)};
GTYPE PHASE_M90ROT[4] = {make_cuDoubleComplex(1.0, 0.0),
make_cuDoubleComplex(0.0, -1), make_cuDoubleComplex(-1, 0.0),
make_cuDoubleComplex(0.0, 1)};
if (state_index < loop_dim) {
// create base index
ITYPE basis_0 =
insert_zero_to_basis_index_device(state_index, pivot_qubit_index);
// gather index
ITYPE basis_1 = basis_0 ^ bit_flip_mask;
// determine parity
unsigned int bit_parity_0 = __popcll(basis_0 & phase_flip_mask) & 1;
unsigned int bit_parity_1 = __popcll(basis_1 & phase_flip_mask) & 1;
// fetch values
GTYPE cval_0 = state_gpu[basis_0];
GTYPE cval_1 = state_gpu[basis_1];
// set values
GTYPE tmp = cuCmul(make_cuDoubleComplex(sinval * cuCreal(cval_1),
sinval * cuCimag(cval_1)),
PHASE_M90ROT[(global_phase_90rot_count + bit_parity_0 * 2) & 3]);
// state[basis_0] = cuCmul(cosval, cval_0) + 1.i * sinval * cval_1 *
// PHASE_M90ROT[ (global_phase_90rot_count + bit_parity_0*2)&3 ]; // % 4
state_gpu[basis_0] =
cuCadd(make_cuDoubleComplex(
cosval * cuCreal(cval_0), cosval * cuCimag(cval_0)),
cuCmul(tmp, make_cuDoubleComplex(0.0, 1.0)));
// state[basis_1] = cosval * cval_1 + 1.i * sinval * cval_0 *
// PHASE_M90ROT[ (global_phase_90rot_count + bit_parity_1*2)&3 ]; // % 4
tmp = cuCmul(make_cuDoubleComplex(
sinval * cuCreal(cval_0), sinval * cuCimag(cval_0)),
PHASE_M90ROT[(global_phase_90rot_count + bit_parity_1 * 2) & 3]);
state_gpu[basis_1] =
cuCadd(make_cuDoubleComplex(
cosval * cuCreal(cval_1), cosval * cuCimag(cval_1)),
cuCmul(tmp, make_cuDoubleComplex(0.0, 1.0))); // % 4
}
}
__global__ void multi_qubit_Pauli_rotation_gate_XZ_mask_gpu(ITYPE bit_flip_mask,
ITYPE phase_flip_mask, UINT global_phase_90rot_count,
UINT pivot_qubit_index, double angle, GTYPE* state_gpu, ITYPE dim) {
multi_qubit_Pauli_rotation_gate_XZ_mask_device(bit_flip_mask,
phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, angle,
state_gpu, dim);
}
__host__ void multi_qubit_Pauli_rotation_gate_XZ_mask_host(ITYPE bit_flip_mask,
ITYPE phase_flip_mask, UINT global_phase_90rot_count,
UINT pivot_qubit_index, double angle, void* state, ITYPE dim, void* stream,
unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice((int)device_number);
hipStream_t* cuda_stream = reinterpret_cast<hipStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
hipError_t cudaStatus;
unsigned int block = dim <= 1024 ? dim : 1024;
unsigned int grid = dim / block;
hipLaunchKernelGGL(( multi_qubit_Pauli_rotation_gate_XZ_mask_gpu), dim3(grid), dim3(block), 0,
*cuda_stream, bit_flip_mask, phase_flip_mask,
global_phase_90rot_count, pivot_qubit_index, angle, state_gpu, dim);
checkCudaErrors(hipStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
cudaStatus = hipGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
}
__device__ void multi_qubit_Pauli_rotation_gate_Z_mask_device(
ITYPE phase_flip_mask, double angle, GTYPE* state_gpu, ITYPE dim) {
// loop variables
const ITYPE loop_dim = dim;
ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x;
// coefs
const double cosval = cos(angle / 2);
const double sinval = sin(angle / 2);
if (state_index < loop_dim) {
// determine sign
UINT bit_parity = __popcll(state_index & phase_flip_mask) & 1;
int sign = 1 - 2 * bit_parity;
// set value
state_gpu[state_index] = cuCmul(state_gpu[state_index],
make_cuDoubleComplex(cosval, sign * sinval));
}
}
__global__ void multi_qubit_Pauli_rotation_gate_Z_mask_gpu(
ITYPE phase_flip_mask, double angle, GTYPE* state_gpu, ITYPE dim) {
multi_qubit_Pauli_rotation_gate_Z_mask_device(
phase_flip_mask, angle, state_gpu, dim);
}
__host__ void multi_qubit_Pauli_rotation_gate_Z_mask_host(ITYPE phase_flip_mask,
double angle, void* state, ITYPE dim, void* stream,
unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice((int)device_number);
hipStream_t* cuda_stream = reinterpret_cast<hipStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
hipError_t cudaStatus;
unsigned int block = dim <= 1024 ? dim : 1024;
unsigned int grid = dim / block;
hipLaunchKernelGGL(( multi_qubit_Pauli_rotation_gate_Z_mask_gpu), dim3(grid), dim3(block), 0,
*cuda_stream, phase_flip_mask, angle, state_gpu, dim);
checkCudaErrors(hipStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
cudaStatus = hipGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
}
__host__ void multi_qubit_Pauli_gate_partial_list_host(
const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list,
UINT target_qubit_index_count, void* state, ITYPE dim, void* stream,
unsigned int device_number) {
// create pauli mask and call function
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_partial_list_gsim(target_qubit_index_list,
Pauli_operator_type_list, target_qubit_index_count, &bit_flip_mask,
&phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index);
if (bit_flip_mask == 0) {
multi_qubit_Pauli_gate_Z_mask_host(
phase_flip_mask, state, dim, stream, device_number);
} else {
multi_qubit_Pauli_gate_XZ_mask_host(bit_flip_mask, phase_flip_mask,
global_phase_90rot_count, pivot_qubit_index, state, dim, stream,
device_number);
}
}
__host__ void multi_qubit_Pauli_gate_whole_list_host(
const UINT* Pauli_operator_type_list, UINT qubit_count, void* state,
ITYPE dim, void* stream, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice((int)device_number);
// create pauli mask and call function
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_whole_list_gsim(Pauli_operator_type_list, qubit_count,
&bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count,
&pivot_qubit_index);
if (bit_flip_mask == 0) {
multi_qubit_Pauli_gate_Z_mask_host(
phase_flip_mask, state, dim, stream, device_number);
} else {
multi_qubit_Pauli_gate_XZ_mask_host(bit_flip_mask, phase_flip_mask,
global_phase_90rot_count, pivot_qubit_index, state, dim, stream,
device_number);
}
}
__host__ void multi_qubit_Pauli_rotation_gate_partial_list_host(
const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list,
UINT target_qubit_index_count, double angle, void* state, ITYPE dim,
void* stream, unsigned int device_number) {
// create pauli mask and call function
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_partial_list_gsim(target_qubit_index_list,
Pauli_operator_type_list, target_qubit_index_count, &bit_flip_mask,
&phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index);
if (bit_flip_mask == 0) {
multi_qubit_Pauli_rotation_gate_Z_mask_host(
phase_flip_mask, angle, state, dim, stream, device_number);
} else {
multi_qubit_Pauli_rotation_gate_XZ_mask_host(bit_flip_mask,
phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, angle,
state, dim, stream, device_number);
}
}
__host__ void multi_qubit_Pauli_rotation_gate_whole_list_host(
const UINT* Pauli_operator_type_list, UINT qubit_count, double angle,
void* state, ITYPE dim, void* stream, unsigned int device_number) {
// create pauli mask and call function
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_whole_list_gsim(Pauli_operator_type_list, qubit_count,
&bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count,
&pivot_qubit_index);
if (bit_flip_mask == 0) {
multi_qubit_Pauli_rotation_gate_Z_mask_host(
phase_flip_mask, angle, state, dim, stream, device_number);
} else {
multi_qubit_Pauli_rotation_gate_XZ_mask_host(bit_flip_mask,
phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, angle,
state, dim, stream, device_number);
}
}
// target_qubit_count <= 5
__global__ void multi_qubit_dense_matrix_gate_shared_gpu(
UINT target_qubit_index_count, GTYPE* state_gpu, ITYPE dim) {
__shared__ GTYPE state_basis[1024];
GTYPE tmp = make_cuDoubleComplex(0.0, 0.0);
ITYPE loop_dim = dim >> target_qubit_index_count;
ITYPE basis = blockIdx.x * blockDim.x + threadIdx.x;
int j;
ITYPE mat_len = 1ULL << target_qubit_index_count;
if (basis < loop_dim) {
for (j = 0; j < target_qubit_index_count; ++j)
basis = insert_zero_to_basis_index_device(
basis, sorted_insert_index_list_gpu[j]);
for (j = 0; j < target_qubit_index_count; ++j)
basis +=
(1ULL << target_index_list_gpu[j]) * ((threadIdx.y >> j) & 1);
state_basis[(threadIdx.x << target_qubit_index_count) + threadIdx.y] =
state_gpu[basis];
__syncthreads();
for (j = 0; j < mat_len; ++j)
tmp = cuCadd(tmp,
cuCmul(
matrix_const_gpu[(threadIdx.y << target_qubit_index_count) +
j],
state_basis[(threadIdx.x << target_qubit_index_count) +
j]));
state_gpu[basis] = tmp;
}
}
// target_qubit_count <= 10
__global__ void multi_qubit_dense_matrix_gate_shared_gpu(
UINT target_qubit_index_count, GTYPE* matrix_gpu, GTYPE* state_gpu,
ITYPE dim) {
__shared__ GTYPE state_basis[1024];
GTYPE tmp = make_cuDoubleComplex(0.0, 0.0);
ITYPE loop_dim = dim >> target_qubit_index_count;
ITYPE basis = blockIdx.x * blockDim.x + threadIdx.x;
int j;
ITYPE mat_len = 1ULL << target_qubit_index_count;
if (basis < loop_dim) {
for (j = 0; j < target_qubit_index_count; ++j)
basis = insert_zero_to_basis_index_device(
basis, sorted_insert_index_list_gpu[j]);
for (j = 0; j < target_qubit_index_count; ++j)
basis +=
(1ULL << target_index_list_gpu[j]) * ((threadIdx.y >> j) & 1);
state_basis[(threadIdx.x << target_qubit_index_count) + threadIdx.y] =
state_gpu[basis];
__syncthreads();
for (j = 0; j < mat_len; ++j)
tmp = cuCadd(tmp,
cuCmul(
matrix_gpu[(threadIdx.y << target_qubit_index_count) + j],
state_basis[(threadIdx.x << target_qubit_index_count) +
j]));
state_gpu[basis] = tmp;
}
}
// there is no atomicAdd
// target_qubit_index_count<=11
__global__ void multi_qubit_dense_matrix_gate_half_shared_gpu(
UINT target_qubit_index_count, GTYPE* matrix_gpu, GTYPE* state_gpu,
ITYPE dim) {
__shared__ GTYPE state_basis[2048];
ITYPE loop_dim = dim >> target_qubit_index_count;
ITYPE basis = blockIdx.x * blockDim.x + threadIdx.x;
ITYPE basis0, basis1;
ITYPE matrix_len = 1ULL << target_qubit_index_count;
// ITYPE half_matrix_len = 1ULL << (target_qubit_index_count-1);
if (basis < loop_dim) {
for (int j = 0; j < target_qubit_index_count; ++j)
basis = insert_zero_to_basis_index_device(
basis, sorted_insert_index_list_gpu[j]);
for (int j = 0; j < target_qubit_index_count - 1; ++j)
basis += (1ULL << target_index_list_gpu[j + 1]) *
((threadIdx.y >> j) & 1);
basis0 = basis;
basis1 = basis0 ^ (1ULL << sorted_insert_index_list_gpu[0]);
state_basis[(threadIdx.x << target_qubit_index_count) +
(threadIdx.y << 1)] = state_gpu[basis0];
state_basis[(threadIdx.x << target_qubit_index_count) +
(threadIdx.y << 1) + 1] = state_gpu[basis1];
__syncthreads();
GTYPE d_buff = make_cuDoubleComplex(0.0, 0.0);
for (int j = 0; j < matrix_len; ++j)
d_buff = cuCadd(d_buff,
cuCmul(matrix_gpu[((threadIdx.y << 1)
<< target_qubit_index_count) +
j],
state_basis[(threadIdx.x << target_qubit_index_count) +
j]));
state_gpu[basis0] = d_buff;
d_buff = make_cuDoubleComplex(0.0, 0.0);
for (int j = 0; j < matrix_len; ++j)
d_buff = cuCadd(d_buff,
cuCmul(matrix_gpu[(((threadIdx.y << 1) + 1)
<< target_qubit_index_count) +
j],
state_basis[(threadIdx.x << target_qubit_index_count) +
j]));
state_gpu[basis1] = d_buff;
// printf("basis0: %d, basis1: %d\n", (int)basis0, (int)basis1);
}
}
__global__ void multi_qubit_dense_matrix_gate_gpu(UINT target_qubit_index_count,
GTYPE* matrix_gpu, GTYPE* state_gpu, GTYPE* state_gpu_copy, ITYPE dim) {
__shared__ GTYPE state_basis[1024];
ITYPE loop_dim = dim >> target_qubit_index_count;
ITYPE large_block_index = 0;
ITYPE large_block_residual = 0;
ITYPE block_loop_dim = 1; // target_qubit_index_count-3;
ITYPE block_index = 0;
ITYPE block_residual =
0; // block_loop_dim<=1 ? 0 : blockIdx.x % (1ULL<<block_loop_dim);
ITYPE basis = blockIdx.x * blockDim.x + threadIdx.x;
ITYPE assign_basis;
ITYPE basis0;
if (target_qubit_index_count >= 10 + 1) {
block_loop_dim = 1ULL << (target_qubit_index_count - 10);
large_block_index = blockIdx.x / (block_loop_dim * block_loop_dim);
large_block_residual = blockIdx.x % (block_loop_dim * block_loop_dim);
block_index = large_block_residual / block_loop_dim;
block_residual = blockIdx.x % block_loop_dim;
basis = large_block_index * blockDim.x + threadIdx.x;
}
ITYPE matrix_len = 1ULL << target_qubit_index_count;
if (basis < loop_dim) {
ITYPE tmp = (block_residual << 10) + threadIdx.y;
for (int j = 0; j < target_qubit_index_count; ++j)
basis = insert_zero_to_basis_index_device(
basis, sorted_insert_index_list_gpu[j]);
basis0 = basis;
for (int j = 0; j < target_qubit_index_count; ++j)
basis += (1ULL << target_index_list_gpu[j]) * ((tmp >> j) & 1);
state_basis[(threadIdx.x << target_qubit_index_count) + threadIdx.y] =
state_gpu_copy[basis];
if (target_qubit_index_count >= 10 + 1) {
tmp = (block_index << 10) + threadIdx.y;
assign_basis = basis0;
for (int j = 0; j < target_qubit_index_count; ++j)
assign_basis +=
(1ULL << target_index_list_gpu[j]) * ((tmp >> j) & 1);
} else {
assign_basis = basis;
}
__syncthreads();
GTYPE d_buff = make_cuDoubleComplex(0.0, 0.0);
ITYPE tmp_len = block_residual << 10;
if (matrix_len > 1024) matrix_len = 1024;
ITYPE row_index = (block_index << 10) + threadIdx.y;
for (ITYPE j = 0; j < matrix_len; ++j)
d_buff = cuCadd(d_buff,
cuCmul(matrix_gpu[(row_index << target_qubit_index_count) + j +
tmp_len],
state_basis[(threadIdx.x << target_qubit_index_count) +
j]));
atomicAdd_double_duplicate(&(state_gpu[assign_basis].x), d_buff.x);
atomicAdd_double_duplicate(&(state_gpu[assign_basis].y), d_buff.y);
}
}
__host__ void multi_qubit_dense_matrix_gate_small_qubit_host(
const UINT* target_qubit_index_list, UINT target_qubit_index_count,
const CPPCTYPE* matrix, void* state, ITYPE dim, void* stream,
unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice((int)device_number);
hipStream_t* cuda_stream = reinterpret_cast<hipStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
hipError_t cudaStatus;
// matrix dim, mask, buffer
ITYPE matrix_dim = 1ULL << target_qubit_index_count;
// insert index
UINT* h_sorted_insert_index_list = create_sorted_ui_list_gsim(
target_qubit_index_list, target_qubit_index_count);
// loop variables
ITYPE loop_dim = dim >> target_qubit_index_count;
GTYPE* matrix_gpu;
unsigned int max_block_size = 1024 / matrix_dim;
dim3 block;
block.y = matrix_dim;
block.x = loop_dim <= max_block_size ? loop_dim : max_block_size;
unsigned int grid = loop_dim / block.x;
if (target_qubit_index_count <= 5) {
checkCudaErrors(hipMemcpyToSymbolAsync(matrix_const_gpu, matrix,
sizeof(GTYPE) * matrix_dim * matrix_dim, 0,
hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(hipMemcpyToSymbolAsync(target_index_list_gpu,
target_qubit_index_list,
sizeof(UINT) * target_qubit_index_count, 0,
hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(hipMemcpyToSymbolAsync(sorted_insert_index_list_gpu,
h_sorted_insert_index_list,
sizeof(UINT) * target_qubit_index_count, 0,
hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
hipLaunchKernelGGL(( multi_qubit_dense_matrix_gate_shared_gpu), dim3(grid), dim3(block), 0,
*cuda_stream, target_qubit_index_count, state_gpu, dim);
} else if (target_qubit_index_count <= 10) {
checkCudaErrors(hipMalloc(reinterpret_cast<void**>(&matrix_gpu),
matrix_dim * matrix_dim * sizeof(GTYPE)),
__FILE__, __LINE__);
checkCudaErrors(hipMemcpyAsync(matrix_gpu, matrix,
matrix_dim * matrix_dim * sizeof(GTYPE),
hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(hipMemcpyToSymbolAsync(target_index_list_gpu,
target_qubit_index_list,
sizeof(UINT) * target_qubit_index_count, 0,
hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(hipMemcpyToSymbolAsync(sorted_insert_index_list_gpu,
h_sorted_insert_index_list,
sizeof(UINT) * target_qubit_index_count, 0,
hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
hipLaunchKernelGGL(( multi_qubit_dense_matrix_gate_shared_gpu), dim3(grid), dim3(block), 0,
*cuda_stream,
target_qubit_index_count, matrix_gpu, state_gpu, dim);
}
checkCudaErrors(hipStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
if (target_qubit_index_count > 5) hipFree(matrix_gpu);
free((UINT*)h_sorted_insert_index_list);
state = reinterpret_cast<void*>(state_gpu);
}
__host__ void multi_qubit_dense_matrix_gate_11qubit_host(
const UINT* target_qubit_index_list, UINT target_qubit_index_count,
const CPPCTYPE* matrix, void* state, ITYPE dim, void* stream,
unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice((int)device_number);
hipStream_t* cuda_stream = reinterpret_cast<hipStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
// hipError_t cudaStatus;
// matrix dim, mask, buffer
ITYPE matrix_dim = 1ULL << target_qubit_index_count;
UINT* h_sorted_insert_index_list = create_sorted_ui_list_gsim(
target_qubit_index_list, target_qubit_index_count);
// loop variables
// ITYPE loop_dim = dim >> target_qubit_index_count;
GTYPE* matrix_gpu;
dim3 block;
block.y = (matrix_dim >> 1) <= 1024 ? (matrix_dim >> 1) : 1024;
unsigned int max_block_size = 1024 / block.y;
block.x = dim / block.y <= max_block_size ? dim / block.y : max_block_size;
unsigned int grid = dim / block.x / block.y;
checkCudaErrors(hipMalloc(reinterpret_cast<void**>(&matrix_gpu),
matrix_dim * matrix_dim * sizeof(GTYPE)),
__FILE__, __LINE__);
checkCudaErrors(hipMemcpyAsync(matrix_gpu, matrix,
matrix_dim * matrix_dim * sizeof(GTYPE),
hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
hipMemcpyToSymbolAsync(target_index_list_gpu, target_qubit_index_list,
sizeof(UINT) * target_qubit_index_count, 0, hipMemcpyHostToDevice,
*cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
hipMemcpyToSymbolAsync(sorted_insert_index_list_gpu,
h_sorted_insert_index_list, sizeof(UINT) * target_qubit_index_count,
0, hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
hipLaunchKernelGGL(( multi_qubit_dense_matrix_gate_half_shared_gpu), dim3(grid), dim3(block), 0,
*cuda_stream, target_qubit_index_count, matrix_gpu, state_gpu, dim);
checkCudaErrors(hipStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
checkCudaErrors(hipGetLastError(), __FILE__, __LINE__);
checkCudaErrors(hipFree(matrix_gpu), __FILE__, __LINE__);
free((UINT*)h_sorted_insert_index_list);
state = reinterpret_cast<void*>(state_gpu);
}
__host__ void multi_qubit_dense_matrix_gate_more_than_11qubit_host(
const UINT* target_qubit_index_list, UINT target_qubit_index_count,
const CPPCTYPE* matrix, void* state, ITYPE dim, void* stream,
UINT device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice((int)device_number);
hipStream_t* cuda_stream = reinterpret_cast<hipStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
// hipError_t cudaStatus;
// matrix dim, mask, buffer
ITYPE matrix_dim = 1ULL << target_qubit_index_count;
UINT* h_sorted_insert_index_list = create_sorted_ui_list_gsim(
target_qubit_index_list, target_qubit_index_count);
// loop variables
ITYPE loop_dim = dim >> target_qubit_index_count;
GTYPE* matrix_gpu;
dim3 grid, block;
block.y = matrix_dim <= (1ULL << 10) ? matrix_dim : (1ULL << 10);
unsigned int max_block_size = (1ULL << 10) / block.y;
block.x = dim / block.y <= max_block_size ? dim / block.y : max_block_size;
grid.x = dim / block.x / block.y;
if (target_qubit_index_count >= 10 + 1)
grid.x = (1ULL << ((target_qubit_index_count - 10) << 1)) * loop_dim;
GTYPE* state_gpu_copy;
checkCudaErrors(hipMalloc(reinterpret_cast<void**>(&matrix_gpu),
matrix_dim * matrix_dim * sizeof(GTYPE)),
__FILE__, __LINE__);
checkCudaErrors(hipMemcpyAsync(matrix_gpu, matrix,
matrix_dim * matrix_dim * sizeof(GTYPE),
hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
hipMemcpyToSymbolAsync(target_index_list_gpu, target_qubit_index_list,
sizeof(UINT) * target_qubit_index_count, 0, hipMemcpyHostToDevice,
*cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
hipMemcpyToSymbolAsync(sorted_insert_index_list_gpu,
h_sorted_insert_index_list, sizeof(UINT) * target_qubit_index_count,
0, hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(hipMalloc(reinterpret_cast<void**>(&state_gpu_copy),
dim * sizeof(GTYPE)),
__FILE__, __LINE__);
checkCudaErrors(
hipMemcpyAsync(state_gpu_copy, state_gpu, dim * sizeof(GTYPE),
hipMemcpyDeviceToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
hipMemsetAsync(state_gpu, 0, dim * sizeof(GTYPE), *cuda_stream),
__FILE__, __LINE__);
hipLaunchKernelGGL(( multi_qubit_dense_matrix_gate_gpu), dim3(grid), dim3(block), 0, *cuda_stream,
target_qubit_index_count, matrix_gpu, state_gpu, state_gpu_copy, dim);
checkCudaErrors(hipStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
checkCudaErrors(hipGetLastError(), __FILE__, __LINE__);
hipFree(state_gpu_copy);
hipFree(matrix_gpu);
free((UINT*)h_sorted_insert_index_list);
state = reinterpret_cast<void*>(state_gpu);
}
__host__ void multi_qubit_dense_matrix_gate_host(
const UINT* target_qubit_index_list, UINT target_qubit_index_count,
const CPPCTYPE* matrix, void* state, ITYPE dim, void* stream,
unsigned int device_number) {
if (target_qubit_index_count == 1) {
single_qubit_dense_matrix_gate_host(target_qubit_index_list[0], matrix,
state, dim, stream, device_number);
} else if (target_qubit_index_count == 2) {
double_qubit_dense_matrix_gate_host(target_qubit_index_list[0],
target_qubit_index_list[1], matrix, state, dim, stream,
device_number);
} else if (target_qubit_index_count == 3) {
triple_qubit_dense_matrix_gate_host(target_qubit_index_list[0],
target_qubit_index_list[1], target_qubit_index_list[2], matrix,
state, dim, stream, device_number);
} else if (target_qubit_index_count == 4) {
UINT target_qubit_index_list_copy[4];
for (int i = 0; i < 4; ++i)
target_qubit_index_list_copy[i] = target_qubit_index_list[i];
quad_qubit_dense_matrix_gate_host(target_qubit_index_list_copy, matrix,
state, dim, stream, device_number);
} else if (target_qubit_index_count == 11) {
multi_qubit_dense_matrix_gate_11qubit_host(target_qubit_index_list,
target_qubit_index_count, matrix, state, dim, stream,
device_number);
} else if (target_qubit_index_count >= 12) {
multi_qubit_dense_matrix_gate_more_than_11qubit_host(
target_qubit_index_list, target_qubit_index_count, matrix, state,
dim, stream, device_number);
} else {
multi_qubit_dense_matrix_gate_small_qubit_host(target_qubit_index_list,
target_qubit_index_count, matrix, state, dim, stream,
device_number);
}
}
// target_qubit_index_count <= 5
__global__ void single_qubit_control_multi_qubit_dense_matrix_gate_const_gpu(
UINT control_qubit_index, UINT control_value, UINT target_qubit_index_count,
GTYPE* state, ITYPE dim) {
// control mask
const ITYPE control_mask = (1ULL << control_qubit_index) * control_value;
const UINT insert_index_count = target_qubit_index_count + 1;
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
// loop varaibles
const ITYPE loop_dim = dim >> insert_index_count;
GTYPE d_buffer[1024];
ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x;
if (state_index < loop_dim) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < insert_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list_gpu[cursor];
basis_0 = insert_zero_to_basis_index_device(basis_0, insert_index);
}
// flip control
basis_0 ^= control_mask;
// compute matrix mul
for (ITYPE y = 0; y < matrix_dim; ++y) {
d_buffer[y] = make_cuDoubleComplex(0.0, 0.0);
for (ITYPE x = 0; x < matrix_dim; ++x) {
d_buffer[y] = cuCadd(
d_buffer[y], cuCmul(matrix_const_gpu[y * matrix_dim + x],
state[basis_0 ^ matrix_mask_list_gpu[x]]));
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list_gpu[y]] = d_buffer[y];
}
}
}
// target_qubit_index_count <= 10
__global__ void single_qubit_control_multi_qubit_dense_matrix_gate_const_gpu(
UINT control_qubit_index, UINT control_value, UINT target_qubit_index_count,
const GTYPE* matrix, GTYPE* state, ITYPE dim) {
// control mask
const ITYPE control_mask = (1ULL << control_qubit_index) * control_value;
const UINT insert_index_count = target_qubit_index_count + 1;
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
// loop varaibles
const ITYPE loop_dim = dim >> insert_index_count;
GTYPE d_buffer[1024];
ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x;
if (state_index < loop_dim) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < insert_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list_gpu[cursor];
basis_0 = insert_zero_to_basis_index_device(basis_0, insert_index);
}
// flip control
basis_0 ^= control_mask;
// compute matrix mul
for (ITYPE y = 0; y < matrix_dim; ++y) {
d_buffer[y] = make_cuDoubleComplex(0.0, 0.0);
for (ITYPE x = 0; x < matrix_dim; ++x) {
d_buffer[y] = cuCadd(
d_buffer[y], cuCmul(matrix[y * matrix_dim + x],
state[basis_0 ^ matrix_mask_list_gpu[x]]));
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list_gpu[y]] = d_buffer[y];
}
}
}
__host__ void single_qubit_control_multi_qubit_dense_matrix_gate_host(
UINT control_qubit_index, UINT control_value,
const UINT* target_qubit_index_list, UINT target_qubit_index_count,
const CPPCTYPE* matrix, void* state, ITYPE dim, void* stream,
unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice((int)device_number);
hipStream_t* cuda_stream = reinterpret_cast<hipStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
hipError_t cudaStatus;
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
ITYPE* matrix_mask_list = create_matrix_mask_list_gsim(
target_qubit_index_list, target_qubit_index_count);
// insert list
const UINT insert_index_count = target_qubit_index_count + 1;
UINT* sorted_insert_index_list = create_sorted_ui_list_value_gsim(
target_qubit_index_list, target_qubit_index_count, control_qubit_index);
GTYPE *d_matrix, *d_matrix_mask_list;
// loop varaibles
const ITYPE loop_dim = dim >> insert_index_count;
unsigned int block = loop_dim <= 1024 ? loop_dim : 1024;
unsigned int grid = loop_dim / block;
if (target_qubit_index_count <= 10) {
if (target_qubit_index_count >= 3) {
unsigned int tmp_block = 1ULL << (13 - target_qubit_index_count);
block = loop_dim <= tmp_block ? loop_dim : tmp_block;
} else {
block = loop_dim <= 1024 ? loop_dim : 1024;
}
grid = loop_dim / block;
if (target_qubit_index_count <= 5) {
checkCudaErrors(hipMemcpyToSymbolAsync(matrix_const_gpu, matrix,
sizeof(GTYPE) * matrix_dim * matrix_dim, 0,
hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(hipMemcpyToSymbolAsync(matrix_mask_list_gpu,
matrix_mask_list, sizeof(ITYPE) * matrix_dim, 0,
hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
hipMemcpyToSymbolAsync(sorted_insert_index_list_gpu,
sorted_insert_index_list,
sizeof(UINT) * (target_qubit_index_count + 1), 0,
hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
hipLaunchKernelGGL(( single_qubit_control_multi_qubit_dense_matrix_gate_const_gpu), dim3(grid),
dim3(block), 0, *cuda_stream, control_qubit_index, control_value,
target_qubit_index_count, state_gpu, dim);
} else {
checkCudaErrors(hipMalloc(reinterpret_cast<void**>(&d_matrix),
matrix_dim * matrix_dim * sizeof(GTYPE)),
__FILE__, __LINE__);
checkCudaErrors(hipMemcpyAsync(d_matrix, matrix,
matrix_dim * matrix_dim * sizeof(GTYPE),
hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
hipMalloc(reinterpret_cast<void**>(&d_matrix_mask_list),
matrix_dim * matrix_dim * sizeof(GTYPE)),
__FILE__, __LINE__);
checkCudaErrors(hipMemcpyAsync(d_matrix_mask_list,
matrix_mask_list, sizeof(ITYPE) * matrix_dim,
hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
hipMemcpyToSymbolAsync(sorted_insert_index_list_gpu,
sorted_insert_index_list,
sizeof(UINT) * (target_qubit_index_count + 1), 0,
hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
hipLaunchKernelGGL(( single_qubit_control_multi_qubit_dense_matrix_gate_const_gpu), dim3(grid),
dim3(block), 0, *cuda_stream, control_qubit_index, control_value,
target_qubit_index_count, d_matrix, state_gpu, dim);
}
} else {
printf("The max number of targets is limited to 10.");
assert(0);
}
checkCudaErrors(hipStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
if (target_qubit_index_count > 5) {
hipFree(d_matrix);
hipFree(d_matrix_mask_list);
}
free(sorted_insert_index_list);
free(matrix_mask_list);
state = reinterpret_cast<void*>(state_gpu);
}
// target_qubit_index_count <= 5
__global__ void multi_qubit_control_multi_qubit_dense_matrix_gate_const_gpu(
ITYPE control_mask, UINT target_qubit_index_count,
ITYPE control_qubit_index_count, GTYPE* state, ITYPE dim) {
// control mask
const UINT insert_index_count =
target_qubit_index_count + control_qubit_index_count;
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
// loop varaibles
const ITYPE loop_dim = dim >> insert_index_count;
GTYPE d_buffer[1024];
ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x;
if (state_index < loop_dim) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < insert_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list_gpu[cursor];
basis_0 = insert_zero_to_basis_index_device(basis_0, insert_index);
}
// flip control
basis_0 ^= control_mask;
// compute matrix mul
for (ITYPE y = 0; y < matrix_dim; ++y) {
d_buffer[y] = make_cuDoubleComplex(0.0, 0.0);
for (ITYPE x = 0; x < matrix_dim; ++x) {
d_buffer[y] = cuCadd(
d_buffer[y], cuCmul(matrix_const_gpu[y * matrix_dim + x],
state[basis_0 ^ matrix_mask_list_gpu[x]]));
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list_gpu[y]] = d_buffer[y];
}
}
}
// target_qubit_index_count <= 10
__global__ void multi_qubit_control_multi_qubit_dense_matrix_gate_const_gpu(
ITYPE control_mask, UINT target_qubit_index_count,
ITYPE control_qubit_index_count, const GTYPE* matrix, GTYPE* state,
ITYPE dim) {
// control mask
const UINT insert_index_count =
target_qubit_index_count + control_qubit_index_count;
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
// loop varaibles
const ITYPE loop_dim = dim >> insert_index_count;
GTYPE d_buffer[1024];
ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x;
if (state_index < loop_dim) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < insert_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list_gpu[cursor];
basis_0 = insert_zero_to_basis_index_device(basis_0, insert_index);
}
// flip control
basis_0 ^= control_mask;
// compute matrix mul
for (ITYPE y = 0; y < matrix_dim; ++y) {
d_buffer[y] = make_cuDoubleComplex(0.0, 0.0);
for (ITYPE x = 0; x < matrix_dim; ++x) {
d_buffer[y] = cuCadd(
d_buffer[y], cuCmul(matrix[y * matrix_dim + x],
state[basis_0 ^ matrix_mask_list_gpu[x]]));
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list_gpu[y]] = d_buffer[y];
}
}
}
__host__ void multi_qubit_control_multi_qubit_dense_matrix_gate_host(
const UINT* control_qubit_index_list, const UINT* control_value_list,
UINT control_qubit_index_count, const UINT* target_qubit_index_list,
UINT target_qubit_index_count, const CPPCTYPE* matrix, void* state,
ITYPE dim, void* stream, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice((int)device_number);
hipStream_t* cuda_stream = reinterpret_cast<hipStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
hipError_t cudaStatus;
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
ITYPE* matrix_mask_list = create_matrix_mask_list_gsim(
target_qubit_index_list, target_qubit_index_count);
// insert index
UINT* sorted_insert_index_list = create_sorted_ui_list_list_gsim(
target_qubit_index_list, target_qubit_index_count,
control_qubit_index_list, control_qubit_index_count);
// control mask
ITYPE control_mask = create_control_mask_gsim(control_qubit_index_list,
control_value_list, control_qubit_index_count);
// loop varaibles
const ITYPE loop_dim =
dim >> (target_qubit_index_count + control_qubit_index_count);
GTYPE *d_matrix, *d_matrix_mask_list;
unsigned int block = loop_dim <= 1024 ? loop_dim : 1024;
unsigned int grid = loop_dim / block;
if (target_qubit_index_count <= 10) {
if (target_qubit_index_count >= 3) {
unsigned int tmp_block = 1ULL << (13 - target_qubit_index_count);
block = loop_dim <= tmp_block ? loop_dim : tmp_block;
} else {
block = loop_dim <= 1024 ? loop_dim : 1024;
}
grid = loop_dim / block;
if (target_qubit_index_count <= 5) {
checkCudaErrors(hipMemcpyToSymbolAsync(matrix_const_gpu, matrix,
sizeof(GTYPE) * matrix_dim * matrix_dim, 0,
hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(hipMemcpyToSymbolAsync(matrix_mask_list_gpu,
matrix_mask_list, sizeof(ITYPE) * matrix_dim, 0,
hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
hipMemcpyToSymbolAsync(sorted_insert_index_list_gpu,
sorted_insert_index_list,
sizeof(UINT) *
(target_qubit_index_count + control_qubit_index_count),
0, hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
hipLaunchKernelGGL(( multi_qubit_control_multi_qubit_dense_matrix_gate_const_gpu), dim3(grid),
dim3(block), 0, *cuda_stream, control_mask,
target_qubit_index_count, control_qubit_index_count, state_gpu,
dim);
} else {
checkCudaErrors(hipMalloc(reinterpret_cast<void**>(&d_matrix),
matrix_dim * matrix_dim * sizeof(GTYPE)),
__FILE__, __LINE__);
checkCudaErrors(hipMemcpyAsync(d_matrix, matrix,
matrix_dim * matrix_dim * sizeof(GTYPE),
hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
hipMalloc(reinterpret_cast<void**>(&d_matrix_mask_list),
matrix_dim * matrix_dim * sizeof(GTYPE)),
__FILE__, __LINE__);
checkCudaErrors(hipMemcpyAsync(d_matrix_mask_list,
matrix_mask_list, sizeof(ITYPE) * matrix_dim,
hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
hipMemcpyToSymbolAsync(sorted_insert_index_list_gpu,
sorted_insert_index_list,
sizeof(UINT) *
(target_qubit_index_count + control_qubit_index_count),
0, hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
hipLaunchKernelGGL(( multi_qubit_control_multi_qubit_dense_matrix_gate_const_gpu), dim3(grid),
dim3(block), 0, *cuda_stream, control_mask,
target_qubit_index_count, control_qubit_index_count, d_matrix,
state_gpu, dim);
}
} else {
printf("The max number of targets is limited to 10.");
assert(0);
}
checkCudaErrors(hipStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
if (target_qubit_index_count > 5) {
hipFree(d_matrix);
hipFree(d_matrix_mask_list);
}
free(sorted_insert_index_list);
free(matrix_mask_list);
state = reinterpret_cast<void*>(state_gpu);
}
// n_qubit <= 10
__global__ void multi_qubit_diagonal_matrix_gate_gpu(
GTYPE* state_gpu, ITYPE dim) {
ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x;
if (state_index < dim) {
state_gpu[state_index] =
cuCmul(matrix_const_gpu[state_index], state_gpu[state_index]);
}
}
// n_qubit > 10
__global__ void multi_qubit_diagonal_matrix_gate_gpu(
GTYPE* matrix_gpu, GTYPE* state_gpu, ITYPE dim) {
ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x;
if (state_index < dim) {
state_gpu[state_index] =
cuCmul(matrix_gpu[state_index], state_gpu[state_index]);
}
}
__host__ void multi_qubit_diagonal_matrix_gate_with_constant_memory_host(
const CPPCTYPE* diagonal_matrix, void* state, ITYPE dim, void* stream,
UINT device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice(device_number);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
hipStream_t* cuda_stream = reinterpret_cast<hipStream_t*>(stream);
checkCudaErrors(hipMemcpyToSymbol(
matrix_const_gpu, diagonal_matrix, sizeof(GTYPE) * dim),
__FILE__, __LINE__);
unsigned int block = dim <= 1024 ? dim : 1024;
unsigned int grid = dim / block;
hipLaunchKernelGGL(( multi_qubit_diagonal_matrix_gate_gpu), dim3(grid), dim3(block), 0, *cuda_stream,
state_gpu, dim);
checkCudaErrors(hipStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
checkCudaErrors(hipGetLastError(), __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
}
__host__ void multi_qubit_diagonal_matrix_gate_with_global_memory_host(
const CPPCTYPE* diagonal_matrix, void* state, ITYPE dim, void* stream,
UINT device_number) {
int current_device = get_current_device();
if (device_number != current_device) hipSetDevice(device_number);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
hipStream_t* cuda_stream = reinterpret_cast<hipStream_t*>(stream);
GTYPE* d_matrix;
checkCudaErrors(
hipMalloc((void**)&d_matrix, sizeof(GTYPE) * dim), __FILE__, __LINE__);
checkCudaErrors(
hipMemcpyAsync(d_matrix, diagonal_matrix, sizeof(GTYPE) * dim,
hipMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
unsigned int block = dim <= 1024 ? dim : 1024;
unsigned int grid = dim / block;
hipLaunchKernelGGL(( multi_qubit_diagonal_matrix_gate_gpu), dim3(grid), dim3(block), 0, *cuda_stream,
d_matrix, state_gpu, dim);
checkCudaErrors(hipStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
checkCudaErrors(hipGetLastError(), __FILE__, __LINE__);
hipFree(d_matrix);
state = reinterpret_cast<void*>(state_gpu);
}
__host__ void multi_qubit_diagonal_matrix_gate_host(
const CPPCTYPE* diagonal_matrix, void* state, ITYPE dim, void* stream,
UINT device_number) {
if (dim <= 1024) {
multi_qubit_diagonal_matrix_gate_with_constant_memory_host(
diagonal_matrix, state, dim, stream, device_number);
} else {
multi_qubit_diagonal_matrix_gate_with_global_memory_host(
diagonal_matrix, state, dim, stream, device_number);
}
}
| 11ee786f6f55dd4d188b242c359a51d54e788cd3.cu | #include <assert.h>
#include <cuComplex.h>
#include <cublas_v2.h>
#include <stdio.h>
#include <algorithm>
#include <functional>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "update_ops_cuda.h"
#include "util.cuh"
#include "util_func.h"
#include "util_type.h"
#include "util_type_internal.h"
// maximum # of GTYPE elements allocating on constant memory: 4096
__constant__ GTYPE matrix_const_gpu[1024];
__constant__ ITYPE matrix_mask_list_gpu[1024];
__constant__ UINT sorted_insert_index_list_gpu[15];
__constant__ UINT target_index_list_gpu[15];
/** vqcsim からの移植
* perform multi_qubit_Pauli_gate with XZ mask.
*
* This function assumes bit_flip_mask is not 0, i.e., at least one bit is
* flipped. If no bit is flipped, use multi_qubit_Pauli_gate_Z_mask. This
* function update the quantum state with Pauli operation. bit_flip_mask,
* phase_flip_mask, global_phase_90rot_count, and pivot_qubit_index must be
* computed before calling this function. See get_masks_from_*_list for the
* above four arguemnts.
*/
// void multi_qubit_Pauli_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE
// phase_flip_mask, UINT global_phase_90rot_count,UINT pivot_qubit_index,
// CPPCTYPE* state, ITYPE dim); void
// multi_qubit_Pauli_rotation_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE
// phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index,
// double angle, CPPCTYPE* state, ITYPE dim); void
// multi_qubit_Pauli_gate_Z_mask(ITYPE phase_flip_mask, CPPCTYPE* state, ITYPE
// dim); void multi_qubit_Pauli_rotation_gate_Z_mask(ITYPE phase_flip_mask,
// double angle, CPPCTYPE* state, ITYPE dim);
__device__ double atomicAdd_double_duplicate(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__global__ void penta_qubit_dense_matrix_gate_gpu(GTYPE* state_gpu, ITYPE dim) {
__shared__ GTYPE state_basis[1024];
GTYPE tmp = make_cuDoubleComplex(0.0, 0.0);
ITYPE loop_dim = dim >> 5;
ITYPE basis = blockIdx.x * blockDim.x + threadIdx.x;
int y;
if (basis < loop_dim) {
for (y = 0; y < 5; ++y)
basis = insert_zero_to_basis_index_device(
basis, sorted_insert_index_list_gpu[y]);
for (y = 0; y < 5; ++y)
basis +=
(1ULL << target_index_list_gpu[y]) * ((threadIdx.y >> y) & 1);
state_basis[(threadIdx.x << 5) + threadIdx.y] = state_gpu[basis];
__syncthreads();
for (y = 0; y < 32; ++y)
tmp = cuCadd(tmp, cuCmul(matrix_const_gpu[(threadIdx.y << 5) + y],
state_basis[(threadIdx.x << 5) + y]));
state_gpu[basis] = tmp;
}
}
__host__ void penta_qubit_dense_matrix_gate_host(
const unsigned int target_qubit_index[5], const CPPCTYPE matrix[1024],
void* state, ITYPE dim, void* stream, UINT device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice((int)device_number);
cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
cudaError cudaStatus;
checkCudaErrors(
cudaMemcpyToSymbolAsync(matrix_const_gpu, matrix, sizeof(GTYPE) * 1024,
0, cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
ITYPE loop_dim = dim >> 5;
dim3 block;
block.y = 32;
block.x = loop_dim <= 32 ? loop_dim : 32;
unsigned int grid = loop_dim / block.x;
checkCudaErrors(
cudaMemcpyToSymbolAsync(target_index_list_gpu, target_qubit_index,
sizeof(UINT) * 5, 0, cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
unsigned int sort_list[5];
memcpy(sort_list, target_qubit_index, sizeof(unsigned int) * 5);
std::sort(sort_list, sort_list + 5);
checkCudaErrors(
cudaMemcpyToSymbolAsync(sorted_insert_index_list_gpu, sort_list,
sizeof(UINT) * 5, 0, cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
penta_qubit_dense_matrix_gate_gpu<<<grid, block, 0, *cuda_stream>>>(
state_gpu, dim);
checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
cudaStatus = cudaGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
}
__global__ void quad_qubit_dense_matrix_gate_shared_gpu(
GTYPE* state_gpu, ITYPE dim) {
__shared__ GTYPE state_basis[1024];
GTYPE tmp = make_cuDoubleComplex(0.0, 0.0);
ITYPE loop_dim = dim >> 4;
ITYPE basis = blockIdx.x * blockDim.x + threadIdx.x;
int y;
if (basis < loop_dim) {
for (y = 0; y < 4; ++y)
basis = insert_zero_to_basis_index_device(
basis, sorted_insert_index_list_gpu[y]);
for (y = 0; y < 4; ++y)
basis += (1ULL << sorted_insert_index_list_gpu[y]) *
((threadIdx.y >> y) & 1);
state_basis[(threadIdx.x << 4) + y] = state_gpu[basis];
__syncthreads();
for (y = 0; y < 16; ++y)
tmp =
cuCadd(tmp, cuCmul(matrix_const_gpu[(threadIdx.y << 4) + y],
state_basis[(threadIdx.x << 4) + threadIdx.y]));
state_gpu[basis] = tmp;
}
}
__global__ void quad_qubit_dense_matrix_gate_gpu(
unsigned int target0_qubit_index, unsigned int target1_qubit_index,
unsigned int target2_qubit_index, unsigned int target3_qubit_index,
unsigned int sorted_index0, unsigned int sorted_index1,
unsigned int sorted_index2, unsigned int sorted_index3, GTYPE* state_gpu,
ITYPE dim) {
// ITYPE basis0;
ITYPE basis[16];
GTYPE d_buffer[16];
ITYPE loop_dim = dim >> 4;
ITYPE basis0 = blockIdx.x * blockDim.x + threadIdx.x;
int x, y;
if (basis0 < loop_dim) {
// basis0 = j;
// create base index
basis0 = insert_zero_to_basis_index_device(basis0, sorted_index0);
basis0 = insert_zero_to_basis_index_device(basis0, sorted_index1);
basis0 = insert_zero_to_basis_index_device(basis0, sorted_index2);
basis0 = insert_zero_to_basis_index_device(basis0, sorted_index3);
basis[0] = basis0; // 0000
basis[1] = basis0 + (1ULL << target0_qubit_index); // 0001
basis[2] = basis0 + (1ULL << target1_qubit_index); // 0010
basis[3] = basis0 + (1ULL << target1_qubit_index) +
(1ULL << target0_qubit_index); // 0011
basis[4] = basis0 + (1ULL << target2_qubit_index); // 0100
basis[5] = basis0 + (1ULL << target2_qubit_index) +
(1ULL << target0_qubit_index); // 0101
basis[6] = basis0 + (1ULL << target2_qubit_index) +
(1ULL << target1_qubit_index); // 0110
basis[7] = basis0 + (1ULL << target2_qubit_index) +
(1ULL << target1_qubit_index) +
(1ULL << target0_qubit_index); // 0111
basis[8] = basis0 + (1ULL << target3_qubit_index); // 1000
basis[9] = basis0 + (1ULL << target3_qubit_index) +
(1ULL << target0_qubit_index); // 1001
basis[10] = basis0 + (1ULL << target3_qubit_index) +
(1ULL << target1_qubit_index); // 1010
basis[11] = basis0 + (1ULL << target3_qubit_index) +
(1ULL << target1_qubit_index) +
(1ULL << target0_qubit_index); // 1011
basis[12] = basis0 + (1ULL << target3_qubit_index) +
(1ULL << target2_qubit_index); // 1100
basis[13] = basis0 + (1ULL << target3_qubit_index) +
(1ULL << target2_qubit_index) +
(1ULL << target0_qubit_index); // 1101
basis[14] = basis0 + (1ULL << target3_qubit_index) +
(1ULL << target2_qubit_index) +
(1ULL << target1_qubit_index); // 1110
basis[15] = basis0 + (1ULL << target3_qubit_index) +
(1ULL << target2_qubit_index) +
(1ULL << target1_qubit_index) +
(1ULL << target0_qubit_index); // 1111
for (y = 0; y < 16; ++y) {
d_buffer[y] = make_cuDoubleComplex(0.0, 0.0);
for (x = 0; x < 16; ++x) {
d_buffer[y] = cuCadd(d_buffer[y],
cuCmul(matrix_const_gpu[y * 16 + x], state_gpu[basis[x]]));
}
}
for (y = 0; y < 16; ++y) {
state_gpu[basis[y]] = d_buffer[y];
}
}
}
__host__ void quad_qubit_dense_matrix_gate_host(
const unsigned int target_qubit_index[4], const CPPCTYPE matrix[256],
void* state, ITYPE dim, void* stream, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice((int)device_number);
cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
cudaError cudaStatus;
checkCudaErrors(
cudaMemcpyToSymbolAsync(matrix_const_gpu, matrix, sizeof(GTYPE) * 256,
0, cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
ITYPE loop_dim = dim >> 4;
unsigned int block = loop_dim <= 512 ? loop_dim : 512;
unsigned int grid = loop_dim / block;
unsigned int target0_qubit_index, target1_qubit_index, target2_qubit_index,
target3_qubit_index;
target0_qubit_index = target_qubit_index[0];
target1_qubit_index = target_qubit_index[1];
target2_qubit_index = target_qubit_index[2];
target3_qubit_index = target_qubit_index[3];
unsigned int sort_list[4];
memcpy(sort_list, target_qubit_index, sizeof(unsigned int) * 4);
std::sort(sort_list, sort_list + 4);
quad_qubit_dense_matrix_gate_gpu<<<grid, block, 0, *cuda_stream>>>(
target0_qubit_index, target1_qubit_index, target2_qubit_index,
target3_qubit_index, sort_list[0], sort_list[1], sort_list[2],
sort_list[3], state_gpu, dim);
checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
cudaStatus = cudaGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
/*
dim3 block;
block.y = 16;
block.x = loop_dim <= 64 ? loop_dim : 64;
unsigned int grid = loop_dim / block.x;
checkCudaErrors(cudaMemcpyToSymbol(sorted_insert_index_list_gpu,
target_qubit_index, sizeof(UINT)*4), __FILE__, __LINE__);
quad_qubit_dense_matrix_gate_shared_gpu << <grid, block >> >(state_gpu,
dim);
checkCudaErrors(cudaDeviceSynchronize(), __FILE__, __LINE__);
cudaStatus = cudaGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
*/
}
// target qubit 0 < target qubit 1 < target qubit 2
__global__ void triple_qubit_dense_matrix_gate_gpu(
unsigned int target0_qubit_index, unsigned int target1_qubit_index,
unsigned int target2_qubit_index, unsigned int small, unsigned int mid,
unsigned int large, GTYPE* state_gpu, ITYPE dim) {
ITYPE basis[8];
GTYPE d_buffer[8];
ITYPE loop_dim = dim >> 3;
ITYPE basis0 = blockIdx.x * blockDim.x + threadIdx.x;
int x, y;
if (basis0 < loop_dim) {
// create base index
basis0 = insert_zero_to_basis_index_device(basis0, small);
basis0 = insert_zero_to_basis_index_device(basis0, mid);
basis0 = insert_zero_to_basis_index_device(basis0, large);
basis[0] = basis0; // 000
basis[1] = basis0 + (1ULL << target0_qubit_index); // 001
basis[2] = basis0 + (1ULL << target1_qubit_index); // 010
basis[3] = basis0 + (1ULL << target1_qubit_index) +
(1ULL << target0_qubit_index); // 011
basis[4] = basis0 + (1ULL << target2_qubit_index); // 100
basis[5] = basis0 + (1ULL << target2_qubit_index) +
(1ULL << target0_qubit_index); // 101
basis[6] = basis0 + (1ULL << target2_qubit_index) +
(1ULL << target1_qubit_index); // 110
basis[7] = basis0 + (1ULL << target2_qubit_index) +
(1ULL << target1_qubit_index) +
(1ULL << target0_qubit_index); // 111
for (y = 0; y < 8; ++y) {
d_buffer[y] = make_cuDoubleComplex(0.0, 0.0);
for (x = 0; x < 8; ++x) {
d_buffer[y] = cuCadd(d_buffer[y],
cuCmul(matrix_const_gpu[y * 8 + x], state_gpu[basis[x]]));
}
}
for (y = 0; y < 8; ++y) state_gpu[basis[y]] = d_buffer[y];
}
}
__host__ void triple_qubit_dense_matrix_gate_host(
unsigned int target0_qubit_index, unsigned int target1_qubit_index,
unsigned int target2_qubit_index, const CPPCTYPE matrix[64], void* state,
ITYPE dim, void* stream, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice((int)device_number);
cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
cudaError cudaStatus;
checkCudaErrors(
cudaMemcpyToSymbolAsync(matrix_const_gpu, matrix, sizeof(GTYPE) * 64, 0,
cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
// (not using shared memory)
ITYPE loop_dim = dim >> 3;
unsigned int block = loop_dim <= 512 ? loop_dim : 512;
unsigned int grid = loop_dim / block;
unsigned int small, mid, large, tmp;
small = target0_qubit_index;
mid = target1_qubit_index;
large = target2_qubit_index;
if (small > mid) {
tmp = small;
small = mid;
mid = tmp;
}
if (mid > large) {
tmp = large;
large = mid;
mid = tmp;
}
if (small > mid) {
tmp = small;
small = mid;
mid = tmp;
}
triple_qubit_dense_matrix_gate_gpu<<<grid, block, 0, *cuda_stream>>>(
target0_qubit_index, target1_qubit_index, target2_qubit_index, small,
mid, large, state_gpu, dim);
checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
cudaStatus = cudaGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
}
// target1 qubit index > target0 qubit index
__global__ void double_qubit_dense_matrix_gate_gpu(
unsigned int target0_qubit_index, unsigned int target1_qubit_index,
unsigned int small, unsigned int large, GTYPE* state_gpu, ITYPE dim) {
// unsigned int left, right;
ITYPE head, body, tail, basis0;
ITYPE basis[4];
GTYPE d_buffer[4];
ITYPE quad_dim = dim >> 2;
ITYPE j = blockIdx.x * blockDim.x + threadIdx.x;
int x, y;
/*
if (target1_qubit_index > target2_qubit_index){
left = target1_qubit_index;
right = target2_qubit_index;
}
else {
left = target2_qubit_index;
right = target1_qubit_index;
}
*/
// target1 qubit index > target2 qubit index
if (j < quad_dim) {
head = j >> (large - 1);
body =
(j & ((1ULL << (large - 1)) - 1)) >> small; // (j % 2^(k-1)) >> i
tail = j & ((1ULL << small) - 1); // j%(2^i)
basis0 = (head << (large + 1)) + (body << (small + 1)) + tail;
basis[0] = basis0;
basis[1] = basis0 + (1ULL << target0_qubit_index);
basis[2] = basis0 + (1ULL << target1_qubit_index);
basis[3] = basis0 + (1ULL << target1_qubit_index) +
(1ULL << target0_qubit_index);
for (y = 0; y < 4; ++y) {
d_buffer[y] = make_cuDoubleComplex(0.0, 0.0);
for (x = 0; x < 4; ++x) {
d_buffer[y] = cuCadd(d_buffer[y],
cuCmul(matrix_const_gpu[y * 4 + x], state_gpu[basis[x]]));
}
}
for (y = 0; y < 4; ++y) state_gpu[basis[y]] = d_buffer[y];
}
}
__host__ void double_qubit_dense_matrix_gate_host(
unsigned int target0_qubit_index, unsigned int target1_qubit_index,
const CPPCTYPE matrix[16], void* state, ITYPE dim, void* stream,
unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice((int)device_number);
cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
cudaError cudaStatus;
checkCudaErrors(
cudaMemcpyToSymbolAsync(matrix_const_gpu, matrix, sizeof(GTYPE) * 16, 0,
cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
ITYPE quad_dim = dim >> 2;
unsigned int block = quad_dim <= 1024 ? quad_dim : 1024;
unsigned int grid = quad_dim / block;
unsigned int small;
unsigned int large;
small = (target0_qubit_index < target1_qubit_index) ? target0_qubit_index
: target1_qubit_index;
large = (target0_qubit_index < target1_qubit_index) ? target1_qubit_index
: target0_qubit_index;
double_qubit_dense_matrix_gate_gpu<<<grid, block, 0, *cuda_stream>>>(
target0_qubit_index, target1_qubit_index, small, large, state_gpu, dim);
checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
cudaStatus = cudaGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
}
// multi_qubit_PauliZ_gate
__device__ void multi_qubit_Pauli_gate_Z_mask_device(
ITYPE phase_flip_mask, GTYPE* state_gpu, ITYPE dim) {
ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x;
// loop varaibles
const ITYPE loop_dim = dim;
if (state_index < loop_dim) {
// determine parity
// UINT bit1_num = popcount64(state_index & phase_flip_mask);
UINT bit1_num = __popcll(state_index & phase_flip_mask);
// set values
if (bit1_num & 1)
state_gpu[state_index] =
make_cuDoubleComplex(-1 * cuCreal(state_gpu[state_index]),
-1 * cuCimag(state_gpu[state_index]));
}
}
__global__ void multi_qubit_Pauli_gate_Z_mask_gpu(
ITYPE phase_flip_mask, GTYPE* state_gpu, ITYPE dim) {
multi_qubit_Pauli_gate_Z_mask_device(phase_flip_mask, state_gpu, dim);
}
__host__ void multi_qubit_Pauli_gate_Z_mask_host(ITYPE phase_flip_mask,
void* state, ITYPE dim, void* stream, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice((int)device_number);
cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
cudaError cudaStatus;
unsigned int block = dim <= 1024 ? dim : 1024;
unsigned int grid = dim / block;
multi_qubit_Pauli_gate_Z_mask_gpu<<<grid, block, 0, *cuda_stream>>>(
phase_flip_mask, state_gpu, dim);
checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
cudaStatus = cudaGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
}
__device__ void multi_qubit_Pauli_gate_XZ_mask_device(ITYPE bit_flip_mask,
ITYPE phase_flip_mask, UINT global_phase_90rot_count,
UINT pivot_qubit_index, GTYPE* state_gpu, ITYPE dim) {
ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x;
// loop varaibles
const ITYPE loop_dim = dim >> 1;
GTYPE PHASE_M90ROT[4] = {make_cuDoubleComplex(1.0, 0.0),
make_cuDoubleComplex(0.0, -1), make_cuDoubleComplex(-1, 0.0),
make_cuDoubleComplex(0.0, 1)};
if (state_index < loop_dim) {
// create base index
ITYPE basis_0 =
insert_zero_to_basis_index_device(state_index, pivot_qubit_index);
// gather index
ITYPE basis_1 = basis_0 ^ bit_flip_mask;
// determine sign
unsigned int sign_0 = __popcll(basis_0 & phase_flip_mask) & 1;
unsigned int sign_1 = __popcll(basis_1 & phase_flip_mask) & 1;
// fetch values
GTYPE cval_0 = state_gpu[basis_0];
GTYPE cval_1 = state_gpu[basis_1];
// set values
state_gpu[basis_0] = cuCmul(cval_1,
PHASE_M90ROT[(global_phase_90rot_count + sign_0 * 2) &
3]); // a % 4 = a & (4-1)
state_gpu[basis_1] = cuCmul(cval_0,
PHASE_M90ROT[(global_phase_90rot_count + sign_1 * 2) &
3]); // a % 4 = a & (4-1)
}
}
__global__ void multi_qubit_Pauli_gate_XZ_mask_gpu(ITYPE bit_flip_mask,
ITYPE phase_flip_mask, UINT global_phase_90rot_count,
UINT pivot_qubit_index, GTYPE* state_gpu, ITYPE dim) {
multi_qubit_Pauli_gate_XZ_mask_device(bit_flip_mask, phase_flip_mask,
global_phase_90rot_count, pivot_qubit_index, state_gpu, dim);
}
__host__ void multi_qubit_Pauli_gate_XZ_mask_host(ITYPE bit_flip_mask,
ITYPE phase_flip_mask, UINT global_phase_90rot_count,
UINT pivot_qubit_index, void* state, ITYPE dim, void* stream,
unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice((int)device_number);
cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
cudaError cudaStatus;
unsigned int block = dim <= 1024 ? dim : 1024;
unsigned int grid = dim / block;
multi_qubit_Pauli_gate_XZ_mask_gpu<<<grid, block, 0, *cuda_stream>>>(
bit_flip_mask, phase_flip_mask, global_phase_90rot_count,
pivot_qubit_index, state_gpu, dim);
checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
cudaStatus = cudaGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
}
__device__ void multi_qubit_Pauli_rotation_gate_XZ_mask_device(
ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count,
UINT pivot_qubit_index, double angle, GTYPE* state_gpu, ITYPE dim) {
ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x;
// loop varaibles
ITYPE loop_dim = dim >> 1;
// coefs
double cosval = cos(angle / 2);
double sinval = sin(angle / 2);
// GTYPE PHASE_90ROT[4] = {make_cuDoubleComplex(1.0,0.0),
// make_cuDoubleComplex(0.0,1.0), make_cuDoubleComplex(-1.0,0.0),
// make_cuDoubleComplex(0.0,-1.0)};
GTYPE PHASE_M90ROT[4] = {make_cuDoubleComplex(1.0, 0.0),
make_cuDoubleComplex(0.0, -1), make_cuDoubleComplex(-1, 0.0),
make_cuDoubleComplex(0.0, 1)};
if (state_index < loop_dim) {
// create base index
ITYPE basis_0 =
insert_zero_to_basis_index_device(state_index, pivot_qubit_index);
// gather index
ITYPE basis_1 = basis_0 ^ bit_flip_mask;
// determine parity
unsigned int bit_parity_0 = __popcll(basis_0 & phase_flip_mask) & 1;
unsigned int bit_parity_1 = __popcll(basis_1 & phase_flip_mask) & 1;
// fetch values
GTYPE cval_0 = state_gpu[basis_0];
GTYPE cval_1 = state_gpu[basis_1];
// set values
GTYPE tmp = cuCmul(make_cuDoubleComplex(sinval * cuCreal(cval_1),
sinval * cuCimag(cval_1)),
PHASE_M90ROT[(global_phase_90rot_count + bit_parity_0 * 2) & 3]);
// state[basis_0] = cuCmul(cosval, cval_0) + 1.i * sinval * cval_1 *
// PHASE_M90ROT[ (global_phase_90rot_count + bit_parity_0*2)&3 ]; // % 4
state_gpu[basis_0] =
cuCadd(make_cuDoubleComplex(
cosval * cuCreal(cval_0), cosval * cuCimag(cval_0)),
cuCmul(tmp, make_cuDoubleComplex(0.0, 1.0)));
// state[basis_1] = cosval * cval_1 + 1.i * sinval * cval_0 *
// PHASE_M90ROT[ (global_phase_90rot_count + bit_parity_1*2)&3 ]; // % 4
tmp = cuCmul(make_cuDoubleComplex(
sinval * cuCreal(cval_0), sinval * cuCimag(cval_0)),
PHASE_M90ROT[(global_phase_90rot_count + bit_parity_1 * 2) & 3]);
state_gpu[basis_1] =
cuCadd(make_cuDoubleComplex(
cosval * cuCreal(cval_1), cosval * cuCimag(cval_1)),
cuCmul(tmp, make_cuDoubleComplex(0.0, 1.0))); // % 4
}
}
__global__ void multi_qubit_Pauli_rotation_gate_XZ_mask_gpu(ITYPE bit_flip_mask,
ITYPE phase_flip_mask, UINT global_phase_90rot_count,
UINT pivot_qubit_index, double angle, GTYPE* state_gpu, ITYPE dim) {
multi_qubit_Pauli_rotation_gate_XZ_mask_device(bit_flip_mask,
phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, angle,
state_gpu, dim);
}
__host__ void multi_qubit_Pauli_rotation_gate_XZ_mask_host(ITYPE bit_flip_mask,
ITYPE phase_flip_mask, UINT global_phase_90rot_count,
UINT pivot_qubit_index, double angle, void* state, ITYPE dim, void* stream,
unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice((int)device_number);
cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
cudaError cudaStatus;
unsigned int block = dim <= 1024 ? dim : 1024;
unsigned int grid = dim / block;
multi_qubit_Pauli_rotation_gate_XZ_mask_gpu<<<grid, block, 0,
*cuda_stream>>>(bit_flip_mask, phase_flip_mask,
global_phase_90rot_count, pivot_qubit_index, angle, state_gpu, dim);
checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
cudaStatus = cudaGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
}
__device__ void multi_qubit_Pauli_rotation_gate_Z_mask_device(
ITYPE phase_flip_mask, double angle, GTYPE* state_gpu, ITYPE dim) {
// loop variables
const ITYPE loop_dim = dim;
ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x;
// coefs
const double cosval = cos(angle / 2);
const double sinval = sin(angle / 2);
if (state_index < loop_dim) {
// determine sign
UINT bit_parity = __popcll(state_index & phase_flip_mask) & 1;
int sign = 1 - 2 * bit_parity;
// set value
state_gpu[state_index] = cuCmul(state_gpu[state_index],
make_cuDoubleComplex(cosval, sign * sinval));
}
}
__global__ void multi_qubit_Pauli_rotation_gate_Z_mask_gpu(
ITYPE phase_flip_mask, double angle, GTYPE* state_gpu, ITYPE dim) {
multi_qubit_Pauli_rotation_gate_Z_mask_device(
phase_flip_mask, angle, state_gpu, dim);
}
__host__ void multi_qubit_Pauli_rotation_gate_Z_mask_host(ITYPE phase_flip_mask,
double angle, void* state, ITYPE dim, void* stream,
unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice((int)device_number);
cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
cudaError cudaStatus;
unsigned int block = dim <= 1024 ? dim : 1024;
unsigned int grid = dim / block;
multi_qubit_Pauli_rotation_gate_Z_mask_gpu<<<grid, block, 0,
*cuda_stream>>>(phase_flip_mask, angle, state_gpu, dim);
checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
cudaStatus = cudaGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
}
__host__ void multi_qubit_Pauli_gate_partial_list_host(
const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list,
UINT target_qubit_index_count, void* state, ITYPE dim, void* stream,
unsigned int device_number) {
// create pauli mask and call function
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_partial_list_gsim(target_qubit_index_list,
Pauli_operator_type_list, target_qubit_index_count, &bit_flip_mask,
&phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index);
if (bit_flip_mask == 0) {
multi_qubit_Pauli_gate_Z_mask_host(
phase_flip_mask, state, dim, stream, device_number);
} else {
multi_qubit_Pauli_gate_XZ_mask_host(bit_flip_mask, phase_flip_mask,
global_phase_90rot_count, pivot_qubit_index, state, dim, stream,
device_number);
}
}
__host__ void multi_qubit_Pauli_gate_whole_list_host(
const UINT* Pauli_operator_type_list, UINT qubit_count, void* state,
ITYPE dim, void* stream, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice((int)device_number);
// create pauli mask and call function
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_whole_list_gsim(Pauli_operator_type_list, qubit_count,
&bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count,
&pivot_qubit_index);
if (bit_flip_mask == 0) {
multi_qubit_Pauli_gate_Z_mask_host(
phase_flip_mask, state, dim, stream, device_number);
} else {
multi_qubit_Pauli_gate_XZ_mask_host(bit_flip_mask, phase_flip_mask,
global_phase_90rot_count, pivot_qubit_index, state, dim, stream,
device_number);
}
}
__host__ void multi_qubit_Pauli_rotation_gate_partial_list_host(
const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list,
UINT target_qubit_index_count, double angle, void* state, ITYPE dim,
void* stream, unsigned int device_number) {
// create pauli mask and call function
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_partial_list_gsim(target_qubit_index_list,
Pauli_operator_type_list, target_qubit_index_count, &bit_flip_mask,
&phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index);
if (bit_flip_mask == 0) {
multi_qubit_Pauli_rotation_gate_Z_mask_host(
phase_flip_mask, angle, state, dim, stream, device_number);
} else {
multi_qubit_Pauli_rotation_gate_XZ_mask_host(bit_flip_mask,
phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, angle,
state, dim, stream, device_number);
}
}
__host__ void multi_qubit_Pauli_rotation_gate_whole_list_host(
const UINT* Pauli_operator_type_list, UINT qubit_count, double angle,
void* state, ITYPE dim, void* stream, unsigned int device_number) {
// create pauli mask and call function
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_whole_list_gsim(Pauli_operator_type_list, qubit_count,
&bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count,
&pivot_qubit_index);
if (bit_flip_mask == 0) {
multi_qubit_Pauli_rotation_gate_Z_mask_host(
phase_flip_mask, angle, state, dim, stream, device_number);
} else {
multi_qubit_Pauli_rotation_gate_XZ_mask_host(bit_flip_mask,
phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, angle,
state, dim, stream, device_number);
}
}
// target_qubit_count <= 5
__global__ void multi_qubit_dense_matrix_gate_shared_gpu(
UINT target_qubit_index_count, GTYPE* state_gpu, ITYPE dim) {
__shared__ GTYPE state_basis[1024];
GTYPE tmp = make_cuDoubleComplex(0.0, 0.0);
ITYPE loop_dim = dim >> target_qubit_index_count;
ITYPE basis = blockIdx.x * blockDim.x + threadIdx.x;
int j;
ITYPE mat_len = 1ULL << target_qubit_index_count;
if (basis < loop_dim) {
for (j = 0; j < target_qubit_index_count; ++j)
basis = insert_zero_to_basis_index_device(
basis, sorted_insert_index_list_gpu[j]);
for (j = 0; j < target_qubit_index_count; ++j)
basis +=
(1ULL << target_index_list_gpu[j]) * ((threadIdx.y >> j) & 1);
state_basis[(threadIdx.x << target_qubit_index_count) + threadIdx.y] =
state_gpu[basis];
__syncthreads();
for (j = 0; j < mat_len; ++j)
tmp = cuCadd(tmp,
cuCmul(
matrix_const_gpu[(threadIdx.y << target_qubit_index_count) +
j],
state_basis[(threadIdx.x << target_qubit_index_count) +
j]));
state_gpu[basis] = tmp;
}
}
// target_qubit_count <= 10
__global__ void multi_qubit_dense_matrix_gate_shared_gpu(
UINT target_qubit_index_count, GTYPE* matrix_gpu, GTYPE* state_gpu,
ITYPE dim) {
__shared__ GTYPE state_basis[1024];
GTYPE tmp = make_cuDoubleComplex(0.0, 0.0);
ITYPE loop_dim = dim >> target_qubit_index_count;
ITYPE basis = blockIdx.x * blockDim.x + threadIdx.x;
int j;
ITYPE mat_len = 1ULL << target_qubit_index_count;
if (basis < loop_dim) {
for (j = 0; j < target_qubit_index_count; ++j)
basis = insert_zero_to_basis_index_device(
basis, sorted_insert_index_list_gpu[j]);
for (j = 0; j < target_qubit_index_count; ++j)
basis +=
(1ULL << target_index_list_gpu[j]) * ((threadIdx.y >> j) & 1);
state_basis[(threadIdx.x << target_qubit_index_count) + threadIdx.y] =
state_gpu[basis];
__syncthreads();
for (j = 0; j < mat_len; ++j)
tmp = cuCadd(tmp,
cuCmul(
matrix_gpu[(threadIdx.y << target_qubit_index_count) + j],
state_basis[(threadIdx.x << target_qubit_index_count) +
j]));
state_gpu[basis] = tmp;
}
}
// there is no atomicAdd
// target_qubit_index_count<=11
__global__ void multi_qubit_dense_matrix_gate_half_shared_gpu(
UINT target_qubit_index_count, GTYPE* matrix_gpu, GTYPE* state_gpu,
ITYPE dim) {
__shared__ GTYPE state_basis[2048];
ITYPE loop_dim = dim >> target_qubit_index_count;
ITYPE basis = blockIdx.x * blockDim.x + threadIdx.x;
ITYPE basis0, basis1;
ITYPE matrix_len = 1ULL << target_qubit_index_count;
// ITYPE half_matrix_len = 1ULL << (target_qubit_index_count-1);
if (basis < loop_dim) {
for (int j = 0; j < target_qubit_index_count; ++j)
basis = insert_zero_to_basis_index_device(
basis, sorted_insert_index_list_gpu[j]);
for (int j = 0; j < target_qubit_index_count - 1; ++j)
basis += (1ULL << target_index_list_gpu[j + 1]) *
((threadIdx.y >> j) & 1);
basis0 = basis;
basis1 = basis0 ^ (1ULL << sorted_insert_index_list_gpu[0]);
state_basis[(threadIdx.x << target_qubit_index_count) +
(threadIdx.y << 1)] = state_gpu[basis0];
state_basis[(threadIdx.x << target_qubit_index_count) +
(threadIdx.y << 1) + 1] = state_gpu[basis1];
__syncthreads();
GTYPE d_buff = make_cuDoubleComplex(0.0, 0.0);
for (int j = 0; j < matrix_len; ++j)
d_buff = cuCadd(d_buff,
cuCmul(matrix_gpu[((threadIdx.y << 1)
<< target_qubit_index_count) +
j],
state_basis[(threadIdx.x << target_qubit_index_count) +
j]));
state_gpu[basis0] = d_buff;
d_buff = make_cuDoubleComplex(0.0, 0.0);
for (int j = 0; j < matrix_len; ++j)
d_buff = cuCadd(d_buff,
cuCmul(matrix_gpu[(((threadIdx.y << 1) + 1)
<< target_qubit_index_count) +
j],
state_basis[(threadIdx.x << target_qubit_index_count) +
j]));
state_gpu[basis1] = d_buff;
// printf("basis0: %d, basis1: %d\n", (int)basis0, (int)basis1);
}
}
__global__ void multi_qubit_dense_matrix_gate_gpu(UINT target_qubit_index_count,
GTYPE* matrix_gpu, GTYPE* state_gpu, GTYPE* state_gpu_copy, ITYPE dim) {
__shared__ GTYPE state_basis[1024];
ITYPE loop_dim = dim >> target_qubit_index_count;
ITYPE large_block_index = 0;
ITYPE large_block_residual = 0;
ITYPE block_loop_dim = 1; // target_qubit_index_count-3;
ITYPE block_index = 0;
ITYPE block_residual =
0; // block_loop_dim<=1 ? 0 : blockIdx.x % (1ULL<<block_loop_dim);
ITYPE basis = blockIdx.x * blockDim.x + threadIdx.x;
ITYPE assign_basis;
ITYPE basis0;
if (target_qubit_index_count >= 10 + 1) {
block_loop_dim = 1ULL << (target_qubit_index_count - 10);
large_block_index = blockIdx.x / (block_loop_dim * block_loop_dim);
large_block_residual = blockIdx.x % (block_loop_dim * block_loop_dim);
block_index = large_block_residual / block_loop_dim;
block_residual = blockIdx.x % block_loop_dim;
basis = large_block_index * blockDim.x + threadIdx.x;
}
ITYPE matrix_len = 1ULL << target_qubit_index_count;
if (basis < loop_dim) {
ITYPE tmp = (block_residual << 10) + threadIdx.y;
for (int j = 0; j < target_qubit_index_count; ++j)
basis = insert_zero_to_basis_index_device(
basis, sorted_insert_index_list_gpu[j]);
basis0 = basis;
for (int j = 0; j < target_qubit_index_count; ++j)
basis += (1ULL << target_index_list_gpu[j]) * ((tmp >> j) & 1);
state_basis[(threadIdx.x << target_qubit_index_count) + threadIdx.y] =
state_gpu_copy[basis];
if (target_qubit_index_count >= 10 + 1) {
tmp = (block_index << 10) + threadIdx.y;
assign_basis = basis0;
for (int j = 0; j < target_qubit_index_count; ++j)
assign_basis +=
(1ULL << target_index_list_gpu[j]) * ((tmp >> j) & 1);
} else {
assign_basis = basis;
}
__syncthreads();
GTYPE d_buff = make_cuDoubleComplex(0.0, 0.0);
ITYPE tmp_len = block_residual << 10;
if (matrix_len > 1024) matrix_len = 1024;
ITYPE row_index = (block_index << 10) + threadIdx.y;
for (ITYPE j = 0; j < matrix_len; ++j)
d_buff = cuCadd(d_buff,
cuCmul(matrix_gpu[(row_index << target_qubit_index_count) + j +
tmp_len],
state_basis[(threadIdx.x << target_qubit_index_count) +
j]));
atomicAdd_double_duplicate(&(state_gpu[assign_basis].x), d_buff.x);
atomicAdd_double_duplicate(&(state_gpu[assign_basis].y), d_buff.y);
}
}
__host__ void multi_qubit_dense_matrix_gate_small_qubit_host(
const UINT* target_qubit_index_list, UINT target_qubit_index_count,
const CPPCTYPE* matrix, void* state, ITYPE dim, void* stream,
unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice((int)device_number);
cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
cudaError cudaStatus;
// matrix dim, mask, buffer
ITYPE matrix_dim = 1ULL << target_qubit_index_count;
// insert index
UINT* h_sorted_insert_index_list = create_sorted_ui_list_gsim(
target_qubit_index_list, target_qubit_index_count);
// loop variables
ITYPE loop_dim = dim >> target_qubit_index_count;
GTYPE* matrix_gpu;
unsigned int max_block_size = 1024 / matrix_dim;
dim3 block;
block.y = matrix_dim;
block.x = loop_dim <= max_block_size ? loop_dim : max_block_size;
unsigned int grid = loop_dim / block.x;
if (target_qubit_index_count <= 5) {
checkCudaErrors(cudaMemcpyToSymbolAsync(matrix_const_gpu, matrix,
sizeof(GTYPE) * matrix_dim * matrix_dim, 0,
cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(cudaMemcpyToSymbolAsync(target_index_list_gpu,
target_qubit_index_list,
sizeof(UINT) * target_qubit_index_count, 0,
cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(cudaMemcpyToSymbolAsync(sorted_insert_index_list_gpu,
h_sorted_insert_index_list,
sizeof(UINT) * target_qubit_index_count, 0,
cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
multi_qubit_dense_matrix_gate_shared_gpu<<<grid, block, 0,
*cuda_stream>>>(target_qubit_index_count, state_gpu, dim);
} else if (target_qubit_index_count <= 10) {
checkCudaErrors(cudaMalloc(reinterpret_cast<void**>(&matrix_gpu),
matrix_dim * matrix_dim * sizeof(GTYPE)),
__FILE__, __LINE__);
checkCudaErrors(cudaMemcpyAsync(matrix_gpu, matrix,
matrix_dim * matrix_dim * sizeof(GTYPE),
cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(cudaMemcpyToSymbolAsync(target_index_list_gpu,
target_qubit_index_list,
sizeof(UINT) * target_qubit_index_count, 0,
cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(cudaMemcpyToSymbolAsync(sorted_insert_index_list_gpu,
h_sorted_insert_index_list,
sizeof(UINT) * target_qubit_index_count, 0,
cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
multi_qubit_dense_matrix_gate_shared_gpu<<<grid, block, 0,
*cuda_stream>>>(
target_qubit_index_count, matrix_gpu, state_gpu, dim);
}
checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
if (target_qubit_index_count > 5) cudaFree(matrix_gpu);
free((UINT*)h_sorted_insert_index_list);
state = reinterpret_cast<void*>(state_gpu);
}
__host__ void multi_qubit_dense_matrix_gate_11qubit_host(
const UINT* target_qubit_index_list, UINT target_qubit_index_count,
const CPPCTYPE* matrix, void* state, ITYPE dim, void* stream,
unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice((int)device_number);
cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
// cudaError cudaStatus;
// matrix dim, mask, buffer
ITYPE matrix_dim = 1ULL << target_qubit_index_count;
UINT* h_sorted_insert_index_list = create_sorted_ui_list_gsim(
target_qubit_index_list, target_qubit_index_count);
// loop variables
// ITYPE loop_dim = dim >> target_qubit_index_count;
GTYPE* matrix_gpu;
dim3 block;
block.y = (matrix_dim >> 1) <= 1024 ? (matrix_dim >> 1) : 1024;
unsigned int max_block_size = 1024 / block.y;
block.x = dim / block.y <= max_block_size ? dim / block.y : max_block_size;
unsigned int grid = dim / block.x / block.y;
checkCudaErrors(cudaMalloc(reinterpret_cast<void**>(&matrix_gpu),
matrix_dim * matrix_dim * sizeof(GTYPE)),
__FILE__, __LINE__);
checkCudaErrors(cudaMemcpyAsync(matrix_gpu, matrix,
matrix_dim * matrix_dim * sizeof(GTYPE),
cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
cudaMemcpyToSymbolAsync(target_index_list_gpu, target_qubit_index_list,
sizeof(UINT) * target_qubit_index_count, 0, cudaMemcpyHostToDevice,
*cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
cudaMemcpyToSymbolAsync(sorted_insert_index_list_gpu,
h_sorted_insert_index_list, sizeof(UINT) * target_qubit_index_count,
0, cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
multi_qubit_dense_matrix_gate_half_shared_gpu<<<grid, block, 0,
*cuda_stream>>>(target_qubit_index_count, matrix_gpu, state_gpu, dim);
checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
checkCudaErrors(cudaGetLastError(), __FILE__, __LINE__);
checkCudaErrors(cudaFree(matrix_gpu), __FILE__, __LINE__);
free((UINT*)h_sorted_insert_index_list);
state = reinterpret_cast<void*>(state_gpu);
}
__host__ void multi_qubit_dense_matrix_gate_more_than_11qubit_host(
const UINT* target_qubit_index_list, UINT target_qubit_index_count,
const CPPCTYPE* matrix, void* state, ITYPE dim, void* stream,
UINT device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice((int)device_number);
cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
// cudaError cudaStatus;
// matrix dim, mask, buffer
ITYPE matrix_dim = 1ULL << target_qubit_index_count;
UINT* h_sorted_insert_index_list = create_sorted_ui_list_gsim(
target_qubit_index_list, target_qubit_index_count);
// loop variables
ITYPE loop_dim = dim >> target_qubit_index_count;
GTYPE* matrix_gpu;
dim3 grid, block;
block.y = matrix_dim <= (1ULL << 10) ? matrix_dim : (1ULL << 10);
unsigned int max_block_size = (1ULL << 10) / block.y;
block.x = dim / block.y <= max_block_size ? dim / block.y : max_block_size;
grid.x = dim / block.x / block.y;
if (target_qubit_index_count >= 10 + 1)
grid.x = (1ULL << ((target_qubit_index_count - 10) << 1)) * loop_dim;
GTYPE* state_gpu_copy;
checkCudaErrors(cudaMalloc(reinterpret_cast<void**>(&matrix_gpu),
matrix_dim * matrix_dim * sizeof(GTYPE)),
__FILE__, __LINE__);
checkCudaErrors(cudaMemcpyAsync(matrix_gpu, matrix,
matrix_dim * matrix_dim * sizeof(GTYPE),
cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
cudaMemcpyToSymbolAsync(target_index_list_gpu, target_qubit_index_list,
sizeof(UINT) * target_qubit_index_count, 0, cudaMemcpyHostToDevice,
*cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
cudaMemcpyToSymbolAsync(sorted_insert_index_list_gpu,
h_sorted_insert_index_list, sizeof(UINT) * target_qubit_index_count,
0, cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(cudaMalloc(reinterpret_cast<void**>(&state_gpu_copy),
dim * sizeof(GTYPE)),
__FILE__, __LINE__);
checkCudaErrors(
cudaMemcpyAsync(state_gpu_copy, state_gpu, dim * sizeof(GTYPE),
cudaMemcpyDeviceToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
cudaMemsetAsync(state_gpu, 0, dim * sizeof(GTYPE), *cuda_stream),
__FILE__, __LINE__);
multi_qubit_dense_matrix_gate_gpu<<<grid, block, 0, *cuda_stream>>>(
target_qubit_index_count, matrix_gpu, state_gpu, state_gpu_copy, dim);
checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
checkCudaErrors(cudaGetLastError(), __FILE__, __LINE__);
cudaFree(state_gpu_copy);
cudaFree(matrix_gpu);
free((UINT*)h_sorted_insert_index_list);
state = reinterpret_cast<void*>(state_gpu);
}
__host__ void multi_qubit_dense_matrix_gate_host(
const UINT* target_qubit_index_list, UINT target_qubit_index_count,
const CPPCTYPE* matrix, void* state, ITYPE dim, void* stream,
unsigned int device_number) {
if (target_qubit_index_count == 1) {
single_qubit_dense_matrix_gate_host(target_qubit_index_list[0], matrix,
state, dim, stream, device_number);
} else if (target_qubit_index_count == 2) {
double_qubit_dense_matrix_gate_host(target_qubit_index_list[0],
target_qubit_index_list[1], matrix, state, dim, stream,
device_number);
} else if (target_qubit_index_count == 3) {
triple_qubit_dense_matrix_gate_host(target_qubit_index_list[0],
target_qubit_index_list[1], target_qubit_index_list[2], matrix,
state, dim, stream, device_number);
} else if (target_qubit_index_count == 4) {
UINT target_qubit_index_list_copy[4];
for (int i = 0; i < 4; ++i)
target_qubit_index_list_copy[i] = target_qubit_index_list[i];
quad_qubit_dense_matrix_gate_host(target_qubit_index_list_copy, matrix,
state, dim, stream, device_number);
} else if (target_qubit_index_count == 11) {
multi_qubit_dense_matrix_gate_11qubit_host(target_qubit_index_list,
target_qubit_index_count, matrix, state, dim, stream,
device_number);
} else if (target_qubit_index_count >= 12) {
multi_qubit_dense_matrix_gate_more_than_11qubit_host(
target_qubit_index_list, target_qubit_index_count, matrix, state,
dim, stream, device_number);
} else {
multi_qubit_dense_matrix_gate_small_qubit_host(target_qubit_index_list,
target_qubit_index_count, matrix, state, dim, stream,
device_number);
}
}
// target_qubit_index_count <= 5
__global__ void single_qubit_control_multi_qubit_dense_matrix_gate_const_gpu(
UINT control_qubit_index, UINT control_value, UINT target_qubit_index_count,
GTYPE* state, ITYPE dim) {
// control mask
const ITYPE control_mask = (1ULL << control_qubit_index) * control_value;
const UINT insert_index_count = target_qubit_index_count + 1;
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
// loop varaibles
const ITYPE loop_dim = dim >> insert_index_count;
GTYPE d_buffer[1024];
ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x;
if (state_index < loop_dim) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < insert_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list_gpu[cursor];
basis_0 = insert_zero_to_basis_index_device(basis_0, insert_index);
}
// flip control
basis_0 ^= control_mask;
// compute matrix mul
for (ITYPE y = 0; y < matrix_dim; ++y) {
d_buffer[y] = make_cuDoubleComplex(0.0, 0.0);
for (ITYPE x = 0; x < matrix_dim; ++x) {
d_buffer[y] = cuCadd(
d_buffer[y], cuCmul(matrix_const_gpu[y * matrix_dim + x],
state[basis_0 ^ matrix_mask_list_gpu[x]]));
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list_gpu[y]] = d_buffer[y];
}
}
}
// target_qubit_index_count <= 10
__global__ void single_qubit_control_multi_qubit_dense_matrix_gate_const_gpu(
UINT control_qubit_index, UINT control_value, UINT target_qubit_index_count,
const GTYPE* matrix, GTYPE* state, ITYPE dim) {
// control mask
const ITYPE control_mask = (1ULL << control_qubit_index) * control_value;
const UINT insert_index_count = target_qubit_index_count + 1;
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
// loop varaibles
const ITYPE loop_dim = dim >> insert_index_count;
GTYPE d_buffer[1024];
ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x;
if (state_index < loop_dim) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < insert_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list_gpu[cursor];
basis_0 = insert_zero_to_basis_index_device(basis_0, insert_index);
}
// flip control
basis_0 ^= control_mask;
// compute matrix mul
for (ITYPE y = 0; y < matrix_dim; ++y) {
d_buffer[y] = make_cuDoubleComplex(0.0, 0.0);
for (ITYPE x = 0; x < matrix_dim; ++x) {
d_buffer[y] = cuCadd(
d_buffer[y], cuCmul(matrix[y * matrix_dim + x],
state[basis_0 ^ matrix_mask_list_gpu[x]]));
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list_gpu[y]] = d_buffer[y];
}
}
}
__host__ void single_qubit_control_multi_qubit_dense_matrix_gate_host(
UINT control_qubit_index, UINT control_value,
const UINT* target_qubit_index_list, UINT target_qubit_index_count,
const CPPCTYPE* matrix, void* state, ITYPE dim, void* stream,
unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice((int)device_number);
cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
cudaError cudaStatus;
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
ITYPE* matrix_mask_list = create_matrix_mask_list_gsim(
target_qubit_index_list, target_qubit_index_count);
// insert list
const UINT insert_index_count = target_qubit_index_count + 1;
UINT* sorted_insert_index_list = create_sorted_ui_list_value_gsim(
target_qubit_index_list, target_qubit_index_count, control_qubit_index);
GTYPE *d_matrix, *d_matrix_mask_list;
// loop varaibles
const ITYPE loop_dim = dim >> insert_index_count;
unsigned int block = loop_dim <= 1024 ? loop_dim : 1024;
unsigned int grid = loop_dim / block;
if (target_qubit_index_count <= 10) {
if (target_qubit_index_count >= 3) {
unsigned int tmp_block = 1ULL << (13 - target_qubit_index_count);
block = loop_dim <= tmp_block ? loop_dim : tmp_block;
} else {
block = loop_dim <= 1024 ? loop_dim : 1024;
}
grid = loop_dim / block;
if (target_qubit_index_count <= 5) {
checkCudaErrors(cudaMemcpyToSymbolAsync(matrix_const_gpu, matrix,
sizeof(GTYPE) * matrix_dim * matrix_dim, 0,
cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(cudaMemcpyToSymbolAsync(matrix_mask_list_gpu,
matrix_mask_list, sizeof(ITYPE) * matrix_dim, 0,
cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
cudaMemcpyToSymbolAsync(sorted_insert_index_list_gpu,
sorted_insert_index_list,
sizeof(UINT) * (target_qubit_index_count + 1), 0,
cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
single_qubit_control_multi_qubit_dense_matrix_gate_const_gpu<<<grid,
block, 0, *cuda_stream>>>(control_qubit_index, control_value,
target_qubit_index_count, state_gpu, dim);
} else {
checkCudaErrors(cudaMalloc(reinterpret_cast<void**>(&d_matrix),
matrix_dim * matrix_dim * sizeof(GTYPE)),
__FILE__, __LINE__);
checkCudaErrors(cudaMemcpyAsync(d_matrix, matrix,
matrix_dim * matrix_dim * sizeof(GTYPE),
cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
cudaMalloc(reinterpret_cast<void**>(&d_matrix_mask_list),
matrix_dim * matrix_dim * sizeof(GTYPE)),
__FILE__, __LINE__);
checkCudaErrors(cudaMemcpyAsync(d_matrix_mask_list,
matrix_mask_list, sizeof(ITYPE) * matrix_dim,
cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
cudaMemcpyToSymbolAsync(sorted_insert_index_list_gpu,
sorted_insert_index_list,
sizeof(UINT) * (target_qubit_index_count + 1), 0,
cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
single_qubit_control_multi_qubit_dense_matrix_gate_const_gpu<<<grid,
block, 0, *cuda_stream>>>(control_qubit_index, control_value,
target_qubit_index_count, d_matrix, state_gpu, dim);
}
} else {
printf("The max number of targets is limited to 10.");
assert(0);
}
checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
if (target_qubit_index_count > 5) {
cudaFree(d_matrix);
cudaFree(d_matrix_mask_list);
}
free(sorted_insert_index_list);
free(matrix_mask_list);
state = reinterpret_cast<void*>(state_gpu);
}
// target_qubit_index_count <= 5
__global__ void multi_qubit_control_multi_qubit_dense_matrix_gate_const_gpu(
ITYPE control_mask, UINT target_qubit_index_count,
ITYPE control_qubit_index_count, GTYPE* state, ITYPE dim) {
// control mask
const UINT insert_index_count =
target_qubit_index_count + control_qubit_index_count;
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
// loop varaibles
const ITYPE loop_dim = dim >> insert_index_count;
GTYPE d_buffer[1024];
ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x;
if (state_index < loop_dim) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < insert_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list_gpu[cursor];
basis_0 = insert_zero_to_basis_index_device(basis_0, insert_index);
}
// flip control
basis_0 ^= control_mask;
// compute matrix mul
for (ITYPE y = 0; y < matrix_dim; ++y) {
d_buffer[y] = make_cuDoubleComplex(0.0, 0.0);
for (ITYPE x = 0; x < matrix_dim; ++x) {
d_buffer[y] = cuCadd(
d_buffer[y], cuCmul(matrix_const_gpu[y * matrix_dim + x],
state[basis_0 ^ matrix_mask_list_gpu[x]]));
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list_gpu[y]] = d_buffer[y];
}
}
}
// target_qubit_index_count <= 10
__global__ void multi_qubit_control_multi_qubit_dense_matrix_gate_const_gpu(
ITYPE control_mask, UINT target_qubit_index_count,
ITYPE control_qubit_index_count, const GTYPE* matrix, GTYPE* state,
ITYPE dim) {
// control mask
const UINT insert_index_count =
target_qubit_index_count + control_qubit_index_count;
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
// loop varaibles
const ITYPE loop_dim = dim >> insert_index_count;
GTYPE d_buffer[1024];
ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x;
if (state_index < loop_dim) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < insert_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list_gpu[cursor];
basis_0 = insert_zero_to_basis_index_device(basis_0, insert_index);
}
// flip control
basis_0 ^= control_mask;
// compute matrix mul
for (ITYPE y = 0; y < matrix_dim; ++y) {
d_buffer[y] = make_cuDoubleComplex(0.0, 0.0);
for (ITYPE x = 0; x < matrix_dim; ++x) {
d_buffer[y] = cuCadd(
d_buffer[y], cuCmul(matrix[y * matrix_dim + x],
state[basis_0 ^ matrix_mask_list_gpu[x]]));
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list_gpu[y]] = d_buffer[y];
}
}
}
__host__ void multi_qubit_control_multi_qubit_dense_matrix_gate_host(
const UINT* control_qubit_index_list, const UINT* control_value_list,
UINT control_qubit_index_count, const UINT* target_qubit_index_list,
UINT target_qubit_index_count, const CPPCTYPE* matrix, void* state,
ITYPE dim, void* stream, unsigned int device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice((int)device_number);
cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
cudaError cudaStatus;
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
ITYPE* matrix_mask_list = create_matrix_mask_list_gsim(
target_qubit_index_list, target_qubit_index_count);
// insert index
UINT* sorted_insert_index_list = create_sorted_ui_list_list_gsim(
target_qubit_index_list, target_qubit_index_count,
control_qubit_index_list, control_qubit_index_count);
// control mask
ITYPE control_mask = create_control_mask_gsim(control_qubit_index_list,
control_value_list, control_qubit_index_count);
// loop varaibles
const ITYPE loop_dim =
dim >> (target_qubit_index_count + control_qubit_index_count);
GTYPE *d_matrix, *d_matrix_mask_list;
unsigned int block = loop_dim <= 1024 ? loop_dim : 1024;
unsigned int grid = loop_dim / block;
if (target_qubit_index_count <= 10) {
if (target_qubit_index_count >= 3) {
unsigned int tmp_block = 1ULL << (13 - target_qubit_index_count);
block = loop_dim <= tmp_block ? loop_dim : tmp_block;
} else {
block = loop_dim <= 1024 ? loop_dim : 1024;
}
grid = loop_dim / block;
if (target_qubit_index_count <= 5) {
checkCudaErrors(cudaMemcpyToSymbolAsync(matrix_const_gpu, matrix,
sizeof(GTYPE) * matrix_dim * matrix_dim, 0,
cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(cudaMemcpyToSymbolAsync(matrix_mask_list_gpu,
matrix_mask_list, sizeof(ITYPE) * matrix_dim, 0,
cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
cudaMemcpyToSymbolAsync(sorted_insert_index_list_gpu,
sorted_insert_index_list,
sizeof(UINT) *
(target_qubit_index_count + control_qubit_index_count),
0, cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
multi_qubit_control_multi_qubit_dense_matrix_gate_const_gpu<<<grid,
block, 0, *cuda_stream>>>(control_mask,
target_qubit_index_count, control_qubit_index_count, state_gpu,
dim);
} else {
checkCudaErrors(cudaMalloc(reinterpret_cast<void**>(&d_matrix),
matrix_dim * matrix_dim * sizeof(GTYPE)),
__FILE__, __LINE__);
checkCudaErrors(cudaMemcpyAsync(d_matrix, matrix,
matrix_dim * matrix_dim * sizeof(GTYPE),
cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
cudaMalloc(reinterpret_cast<void**>(&d_matrix_mask_list),
matrix_dim * matrix_dim * sizeof(GTYPE)),
__FILE__, __LINE__);
checkCudaErrors(cudaMemcpyAsync(d_matrix_mask_list,
matrix_mask_list, sizeof(ITYPE) * matrix_dim,
cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
checkCudaErrors(
cudaMemcpyToSymbolAsync(sorted_insert_index_list_gpu,
sorted_insert_index_list,
sizeof(UINT) *
(target_qubit_index_count + control_qubit_index_count),
0, cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
multi_qubit_control_multi_qubit_dense_matrix_gate_const_gpu<<<grid,
block, 0, *cuda_stream>>>(control_mask,
target_qubit_index_count, control_qubit_index_count, d_matrix,
state_gpu, dim);
}
} else {
printf("The max number of targets is limited to 10.");
assert(0);
}
checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
if (target_qubit_index_count > 5) {
cudaFree(d_matrix);
cudaFree(d_matrix_mask_list);
}
free(sorted_insert_index_list);
free(matrix_mask_list);
state = reinterpret_cast<void*>(state_gpu);
}
// n_qubit <= 10
__global__ void multi_qubit_diagonal_matrix_gate_gpu(
GTYPE* state_gpu, ITYPE dim) {
ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x;
if (state_index < dim) {
state_gpu[state_index] =
cuCmul(matrix_const_gpu[state_index], state_gpu[state_index]);
}
}
// n_qubit > 10
__global__ void multi_qubit_diagonal_matrix_gate_gpu(
GTYPE* matrix_gpu, GTYPE* state_gpu, ITYPE dim) {
ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x;
if (state_index < dim) {
state_gpu[state_index] =
cuCmul(matrix_gpu[state_index], state_gpu[state_index]);
}
}
__host__ void multi_qubit_diagonal_matrix_gate_with_constant_memory_host(
const CPPCTYPE* diagonal_matrix, void* state, ITYPE dim, void* stream,
UINT device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice(device_number);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream);
checkCudaErrors(cudaMemcpyToSymbol(
matrix_const_gpu, diagonal_matrix, sizeof(GTYPE) * dim),
__FILE__, __LINE__);
unsigned int block = dim <= 1024 ? dim : 1024;
unsigned int grid = dim / block;
multi_qubit_diagonal_matrix_gate_gpu<<<grid, block, 0, *cuda_stream>>>(
state_gpu, dim);
checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
checkCudaErrors(cudaGetLastError(), __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
}
__host__ void multi_qubit_diagonal_matrix_gate_with_global_memory_host(
const CPPCTYPE* diagonal_matrix, void* state, ITYPE dim, void* stream,
UINT device_number) {
int current_device = get_current_device();
if (device_number != current_device) cudaSetDevice(device_number);
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream);
GTYPE* d_matrix;
checkCudaErrors(
cudaMalloc((void**)&d_matrix, sizeof(GTYPE) * dim), __FILE__, __LINE__);
checkCudaErrors(
cudaMemcpyAsync(d_matrix, diagonal_matrix, sizeof(GTYPE) * dim,
cudaMemcpyHostToDevice, *cuda_stream),
__FILE__, __LINE__);
unsigned int block = dim <= 1024 ? dim : 1024;
unsigned int grid = dim / block;
multi_qubit_diagonal_matrix_gate_gpu<<<grid, block, 0, *cuda_stream>>>(
d_matrix, state_gpu, dim);
checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__);
checkCudaErrors(cudaGetLastError(), __FILE__, __LINE__);
cudaFree(d_matrix);
state = reinterpret_cast<void*>(state_gpu);
}
__host__ void multi_qubit_diagonal_matrix_gate_host(
const CPPCTYPE* diagonal_matrix, void* state, ITYPE dim, void* stream,
UINT device_number) {
if (dim <= 1024) {
multi_qubit_diagonal_matrix_gate_with_constant_memory_host(
diagonal_matrix, state, dim, stream, device_number);
} else {
multi_qubit_diagonal_matrix_gate_with_global_memory_host(
diagonal_matrix, state, dim, stream, device_number);
}
}
|
b09dc04735e631fa3c0b1acb6d8b5fe719596844.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "../debug.h"
__global__ void add(int *a, int *b, int *c)
{
*c = *a + *b;
}
int main()
{
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof( int );
/* get GPU device number and name */
int dev;
hipDeviceProp_t deviceProp;
checkCUDA( hipGetDevice( &dev ) );
checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* allocate space for device copies of a, b, c */
checkCUDA( hipMalloc( (void **) &d_a, size ) );
checkCUDA( hipMalloc( (void **) &d_b, size ) );
checkCUDA( hipMalloc( (void **) &d_c, size ) );
/* setup initial values */
a = 2;
b = 7;
c = -99;
/* copy inputs to device */
checkCUDA( hipMemcpy( d_a, &a, size, hipMemcpyHostToDevice ) );
checkCUDA( hipMemcpy( d_b, &b, size, hipMemcpyHostToDevice ) );
/* launch the kernel on the GPU */
hipLaunchKernelGGL(( add), dim3(1), dim3(1) , 0, 0, d_a, d_b, d_c );
checkKERNEL()
/* copy result back to host */
checkCUDA( hipMemcpy( &c, d_c, size, hipMemcpyDeviceToHost ) );
printf("value of c after kernel is %d\n",c);
if( c == ( a + b ) ) printf("PASS\n");
else printf("FAIL\n");
/* clean up */
checkCUDA( hipFree( d_a ) );
checkCUDA( hipFree( d_b ) );
checkCUDA( hipFree( d_c ) );
checkCUDA( hipDeviceReset() );
return 0;
} /* end main */
| b09dc04735e631fa3c0b1acb6d8b5fe719596844.cu | /*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "../debug.h"
__global__ void add(int *a, int *b, int *c)
{
*c = *a + *b;
}
int main()
{
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof( int );
/* get GPU device number and name */
int dev;
cudaDeviceProp deviceProp;
checkCUDA( cudaGetDevice( &dev ) );
checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* allocate space for device copies of a, b, c */
checkCUDA( cudaMalloc( (void **) &d_a, size ) );
checkCUDA( cudaMalloc( (void **) &d_b, size ) );
checkCUDA( cudaMalloc( (void **) &d_c, size ) );
/* setup initial values */
a = 2;
b = 7;
c = -99;
/* copy inputs to device */
checkCUDA( cudaMemcpy( d_a, &a, size, cudaMemcpyHostToDevice ) );
checkCUDA( cudaMemcpy( d_b, &b, size, cudaMemcpyHostToDevice ) );
/* launch the kernel on the GPU */
add<<< 1, 1 >>>( d_a, d_b, d_c );
checkKERNEL()
/* copy result back to host */
checkCUDA( cudaMemcpy( &c, d_c, size, cudaMemcpyDeviceToHost ) );
printf("value of c after kernel is %d\n",c);
if( c == ( a + b ) ) printf("PASS\n");
else printf("FAIL\n");
/* clean up */
checkCUDA( cudaFree( d_a ) );
checkCUDA( cudaFree( d_b ) );
checkCUDA( cudaFree( d_c ) );
checkCUDA( cudaDeviceReset() );
return 0;
} /* end main */
|
7b0edcea765c93fd2a8e6a3fa20b98e5cf402ada.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
void init_cpu_data(int* A, int size, int stride){
for (int i = 0; i < size; ++i){
A[i]=(i + stride) % size;
}
}
/*
__device__ void cache_warmup(int *A, int iterations, int *B){
int j = 0;
for (int it =0; it < iterations; it ++){
j = A[j];
}
B[0] = j;
}
*/
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing(int mark, int *A, int iterations, int *B, int starting_index, float clock_rate){//////////////should not hit in the tlb, and should also miss in the cache, to see the time difference.
int j = starting_index;/////make them in the same page, and miss near in cache lines
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
start_time = clock64();//////clock
for (int it =0; it < iterations; it ++){
j = A[j];
}
end_time=clock64();//////clock
long long int total_time = end_time - start_time;//////clock
printf("inside%d:%fms\n", mark, total_time / (float)clock_rate);//////clock
B[0] = j;
}
__global__ void tlb_latency_test(int *A, int iterations, int *B, float clock_rate){
int i = 4;
long long int start_time = 0;///////////clock
long long int end_time = 0;///////////clock
start_time = clock64();///////////clock
P_chasing(0, A, i, B, 0 * 32, clock_rate);/////TLB and cache warmup
P_chasing(1, A, i, B, 0 * 32 + 6, clock_rate);/////make them in the same page, and hit near in cache lines
P_chasing(2, A, i, B, 0 * 32 + 7, clock_rate);/////make them in the same page, and hit near in cache lines
P_chasing(3, A, i, B, 0 * 32 + 8, clock_rate);/////make them in the same page, and hit near in cache lines
P_chasing(4, A, i, B, 0 * 32 + 14, clock_rate);/////////////make them in the same page, and hit far in cache lines
P_chasing(5, A, i, B, 0 * 32 + 15, clock_rate);////////////make them in the same page, and hit far in cache lines
P_chasing(6, A, i, B, 0 * 32 + 16, clock_rate);////////////make them in the same page, and hit far in cache lines
P_chasing(7, A, i, B, 1 * 32, clock_rate);/////make them in the same page, and miss near in cache lines
P_chasing(8, A, i, B, 2 * 32, clock_rate);/////make them in the same page, and miss near in cache lines
P_chasing(9, A, i, B, 3 * 32, clock_rate);/////make them in the same page, and miss near in cache lines
P_chasing(10, A, i, B, 8 * 32, clock_rate);//////////////make them in the same page, and miss far in cache lines
P_chasing(11, A, i, B, 16 * 32, clock_rate);/////////////make them in the same page, and miss far in cache lines
P_chasing(12, A, i, B, 24 * 32, clock_rate);/////////////make them in the same page, and miss far in cache lines
P_chasing(13, A, i, B, 16 * 524288, clock_rate);//////////////TLB miss, 17th page
P_chasing(14, A, i, B, 32 * 524288, clock_rate);/////////////TLB miss, 33rd page
P_chasing(15, A, i, B, 48 * 524288, clock_rate);/////////////TLB miss, 49th page
end_time=clock64();///////////clock
long long int total_time = end_time - start_time;///////////clock
printf("outside1:%fms\n", total_time / (float)clock_rate);///////////clock
}
int main(int argc, char **argv)
{
printf("\n");
// set device
hipDeviceProp_t device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == hipComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
///////////////////////////////////////////////////////////////////CPU data begin
int iterations = 100;
////////size(int) = 4, 32 = 128b, 256 = 1kb, 32 * 32 = 1024 = 4kb, 262144 = 1mb, 16384 * 32 = 512 * 1024 = 524288 = 2mb.
int data_stride = 524288;/////2mb. Pointing to the next page.
//int data_size = 524288000;/////1000 * 2mb. ##### size = iteration * stride. ##### This can support 1000 iteration. The 1001st iteration starts from head again.
int data_size = iterations * data_stride;/////size = iteration * stride = 100 2mb pages.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
//int *CPU_data_out;
//CPU_data_out = (int*)malloc(data_size * sizeof(int));
init_cpu_data(CPU_data_in, data_size, data_stride);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data begin
int *GPU_data_in;
//////checkCudaErrors(hipMallocManaged(&data, sizeof(int) * data_size));
checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
int *GPU_data_out;
checkCudaErrors(hipMalloc(&GPU_data_out, sizeof(int) * 1));
hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data end
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, GPU_data_in, iterations, GPU_data_out, clock_rate);//////////////////////////////////////////////kernel is here
//hipMemcpy(CPU_data_out, GPU_data_out, sizeof(int) * data_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
checkCudaErrors(hipFree(GPU_data_in));
checkCudaErrors(hipFree(GPU_data_out));
free(CPU_data_in);
//free(CPU_data_out);
exit(EXIT_SUCCESS);
}
| 7b0edcea765c93fd2a8e6a3fa20b98e5cf402ada.cu | #include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
void init_cpu_data(int* A, int size, int stride){
for (int i = 0; i < size; ++i){
A[i]=(i + stride) % size;
}
}
/*
__device__ void cache_warmup(int *A, int iterations, int *B){
int j = 0;
for (int it =0; it < iterations; it ++){
j = A[j];
}
B[0] = j;
}
*/
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing(int mark, int *A, int iterations, int *B, int starting_index, float clock_rate){//////////////should not hit in the tlb, and should also miss in the cache, to see the time difference.
int j = starting_index;/////make them in the same page, and miss near in cache lines
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
start_time = clock64();//////clock
for (int it =0; it < iterations; it ++){
j = A[j];
}
end_time=clock64();//////clock
long long int total_time = end_time - start_time;//////clock
printf("inside%d:%fms\n", mark, total_time / (float)clock_rate);//////clock
B[0] = j;
}
__global__ void tlb_latency_test(int *A, int iterations, int *B, float clock_rate){
int i = 4;
long long int start_time = 0;///////////clock
long long int end_time = 0;///////////clock
start_time = clock64();///////////clock
P_chasing(0, A, i, B, 0 * 32, clock_rate);/////TLB and cache warmup
P_chasing(1, A, i, B, 0 * 32 + 6, clock_rate);/////make them in the same page, and hit near in cache lines
P_chasing(2, A, i, B, 0 * 32 + 7, clock_rate);/////make them in the same page, and hit near in cache lines
P_chasing(3, A, i, B, 0 * 32 + 8, clock_rate);/////make them in the same page, and hit near in cache lines
P_chasing(4, A, i, B, 0 * 32 + 14, clock_rate);/////////////make them in the same page, and hit far in cache lines
P_chasing(5, A, i, B, 0 * 32 + 15, clock_rate);////////////make them in the same page, and hit far in cache lines
P_chasing(6, A, i, B, 0 * 32 + 16, clock_rate);////////////make them in the same page, and hit far in cache lines
P_chasing(7, A, i, B, 1 * 32, clock_rate);/////make them in the same page, and miss near in cache lines
P_chasing(8, A, i, B, 2 * 32, clock_rate);/////make them in the same page, and miss near in cache lines
P_chasing(9, A, i, B, 3 * 32, clock_rate);/////make them in the same page, and miss near in cache lines
P_chasing(10, A, i, B, 8 * 32, clock_rate);//////////////make them in the same page, and miss far in cache lines
P_chasing(11, A, i, B, 16 * 32, clock_rate);/////////////make them in the same page, and miss far in cache lines
P_chasing(12, A, i, B, 24 * 32, clock_rate);/////////////make them in the same page, and miss far in cache lines
P_chasing(13, A, i, B, 16 * 524288, clock_rate);//////////////TLB miss, 17th page
P_chasing(14, A, i, B, 32 * 524288, clock_rate);/////////////TLB miss, 33rd page
P_chasing(15, A, i, B, 48 * 524288, clock_rate);/////////////TLB miss, 49th page
end_time=clock64();///////////clock
long long int total_time = end_time - start_time;///////////clock
printf("outside1:%fms\n", total_time / (float)clock_rate);///////////clock
}
int main(int argc, char **argv)
{
printf("\n");
// set device
cudaDeviceProp device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == cudaComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
///////////////////////////////////////////////////////////////////CPU data begin
int iterations = 100;
////////size(int) = 4, 32 = 128b, 256 = 1kb, 32 * 32 = 1024 = 4kb, 262144 = 1mb, 16384 * 32 = 512 * 1024 = 524288 = 2mb.
int data_stride = 524288;/////2mb. Pointing to the next page.
//int data_size = 524288000;/////1000 * 2mb. ##### size = iteration * stride. ##### This can support 1000 iteration. The 1001st iteration starts from head again.
int data_size = iterations * data_stride;/////size = iteration * stride = 100 2mb pages.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
//int *CPU_data_out;
//CPU_data_out = (int*)malloc(data_size * sizeof(int));
init_cpu_data(CPU_data_in, data_size, data_stride);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data begin
int *GPU_data_in;
//////checkCudaErrors(cudaMallocManaged(&data, sizeof(int) * data_size));
checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
int *GPU_data_out;
checkCudaErrors(cudaMalloc(&GPU_data_out, sizeof(int) * 1));
cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data end
tlb_latency_test<<<1, 1>>>(GPU_data_in, iterations, GPU_data_out, clock_rate);//////////////////////////////////////////////kernel is here
//cudaMemcpy(CPU_data_out, GPU_data_out, sizeof(int) * data_size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
checkCudaErrors(cudaFree(GPU_data_in));
checkCudaErrors(cudaFree(GPU_data_out));
free(CPU_data_in);
//free(CPU_data_out);
exit(EXIT_SUCCESS);
}
|
9bc14db5d494bd235a52490249e56c34b1e047d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <kernelized_correlation_filters_gpu/discrete_fourier_transform_kernel.h>
__global__
void cuFloatToComplexKernel(hipfftComplex *d_complex,
const float *dev_data, const int lenght) {
int t_idx = threadIdx.x + blockIdx.x * blockDim.x;
int t_idy = threadIdx.y + blockIdx.y * blockDim.y;
int offset = t_idx + t_idy * blockDim.x * gridDim.x;
if (offset < lenght) {
d_complex[offset].x = dev_data[offset];
d_complex[offset].y = 0.0f;
}
}
hipfftComplex* convertFloatToComplexGPU(const float *dev_data,
const int FILTER_BATCH,
const int FILTER_SIZE) {
if (FILTER_BATCH == 0 || FILTER_SIZE == 0) {
printf("\033[31m ERROR: [convertFloatToComplexGPU] FAILED\n");
}
int LENGHT = FILTER_BATCH * FILTER_SIZE;
const int BYTE = LENGHT * sizeof(hipfftComplex);
hipfftComplex *d_complex;
hipMalloc(reinterpret_cast<void**>(&d_complex), BYTE);
const int dimension = ::ceil(std::sqrt(LENGHT));
dim3 grid_size(cuDivUp(dimension, GRID_SIZE),
cuDivUp(dimension, GRID_SIZE));
dim3 block_size(GRID_SIZE, GRID_SIZE);
hipLaunchKernelGGL(( cuFloatToComplexKernel), dim3(grid_size), dim3(block_size), 0, 0,
d_complex, dev_data, LENGHT);
return d_complex;
}
/**
* memory reuse for tx1
*/
// hipfftComplex*
bool convertFloatToComplexGPU(hipfftComplex **d_complex,
const float *dev_data,
const int FILTER_BATCH,
const int FILTER_SIZE) {
if (FILTER_BATCH == 0 || FILTER_SIZE == 0) {
printf("\033[31m ERROR: [convertFloatToComplexGPU] FAILED\n");
return false;
}
int LENGHT = FILTER_BATCH * FILTER_SIZE;
// const int BYTE = LENGHT * sizeof(hipfftComplex);
// hipfftComplex *d_complex;
// hipMalloc(reinterpret_cast<void**>(&d_complex), BYTE);
const int dimension = ::ceil(std::sqrt(LENGHT));
dim3 grid_size(cuDivUp(dimension, GRID_SIZE),
cuDivUp(dimension, GRID_SIZE));
dim3 block_size(GRID_SIZE, GRID_SIZE);
hipLaunchKernelGGL(( cuFloatToComplexKernel), dim3(grid_size), dim3(block_size), 0, 0,
*d_complex, dev_data, LENGHT);
return true;
}
__global__
void copyComplexRealToFloatKernel(float *d_output,
const hipfftComplex *d_complex,
const int lenght) {
int t_idx = threadIdx.x + blockIdx.x * blockDim.x;
int t_idy = threadIdx.y + blockIdx.y * blockDim.y;
int offset = t_idx + t_idy * blockDim.x * gridDim.x;
if (offset < lenght) {
d_output[offset] = d_complex[offset].x;
}
}
float* copyComplexRealToFloatGPU(const hipfftComplex* d_complex,
const int FILTER_BATCH,
const int FILTER_SIZE) {
if (FILTER_BATCH == 0 || FILTER_SIZE == 0) {
printf("\033[31m ERROR: [copyComplexRealToFloatGPU] FAILED\n");
}
int LENGHT = FILTER_SIZE * FILTER_BATCH;
int BYTE = LENGHT * sizeof(float);
float *d_output;
hipMalloc(reinterpret_cast<void**>(&d_output), BYTE);
const int dimension = ::ceil(std::sqrt(LENGHT));
dim3 grid_size(cuDivUp(dimension, GRID_SIZE),
cuDivUp(dimension, GRID_SIZE));
dim3 block_size(GRID_SIZE, GRID_SIZE);
hipLaunchKernelGGL(( copyComplexRealToFloatKernel), dim3(grid_size), dim3(block_size), 0, 0,
d_output, d_complex, LENGHT);
return d_output;
}
/**
* memeory reusing for tx1
*/
bool copyComplexRealToFloatGPU(float **d_output,
const hipfftComplex* d_complex,
const int FILTER_BATCH,
const int FILTER_SIZE) {
if (FILTER_BATCH == 0 || FILTER_SIZE == 0) {
printf("\033[31m ERROR: [copyComplexRealToFloatGPU] FAILED\n");
return false;
}
int LENGHT = FILTER_SIZE * FILTER_BATCH;
// int BYTE = LENGHT * sizeof(float);
// float *d_output;
// hipMalloc(reinterpret_cast<void**>(&d_output), BYTE);
const int dimension = ::ceil(std::sqrt(LENGHT));
dim3 grid_size(cuDivUp(dimension, GRID_SIZE),
cuDivUp(dimension, GRID_SIZE));
dim3 block_size(GRID_SIZE, GRID_SIZE);
hipLaunchKernelGGL(( copyComplexRealToFloatKernel), dim3(grid_size), dim3(block_size), 0, 0,
*d_output, d_complex, LENGHT);
return true;
}
//! normalize the data array but a given factor
__global__
void normalizeByFactorKernel(float *d_data,
const float factor,
const int lenght) {
int t_idx = threadIdx.x + blockIdx.x * blockDim.x;
int t_idy = threadIdx.y + blockIdx.y * blockDim.y;
int offset = t_idx + t_idy * blockDim.x * gridDim.x;
if (offset < lenght) {
d_data[offset] /= factor;
}
}
void normalizeByFactorGPU(float *&d_data,
const float factor,
const int FILTER_BATCH,
const int FILTER_SIZE) {
if (FILTER_BATCH == 0 || FILTER_SIZE == 0) {
printf("\033[31m ERROR: [normalizeByFactorGPU] FAILED\n");
}
int LENGHT = FILTER_BATCH * FILTER_SIZE;
const int BYTE = LENGHT * sizeof(float);
const int dimension = ::ceil(std::sqrt(LENGHT));
dim3 grid_size(cuDivUp(dimension, GRID_SIZE),
cuDivUp(dimension, GRID_SIZE));
dim3 block_size(GRID_SIZE, GRID_SIZE);
hipLaunchKernelGGL(( normalizeByFactorKernel), dim3(grid_size), dim3(block_size), 0, 0,
d_data, factor, LENGHT);
}
//! normalize the data array A but a value in the array A
__global__
void getNormalizationFactorFromIndex(float *d_value,
const float *d_data,
const int factor_index,
const int lenght) {
int t_idx = threadIdx.x + blockIdx.x * blockDim.x;
int t_idy = threadIdx.y + blockIdx.y * blockDim.y;
int offset = t_idx + t_idy * blockDim.x * gridDim.x;
if (offset < lenght) {
*d_value = d_data[factor_index];
printf("info: %d %3.4f\n", factor_index, *d_value);
}
}
void normalizeByFactorInArrayGPU(float *&d_data,
const int factor_index,
const int FILTER_BATCH,
const int FILTER_SIZE) {
if (FILTER_BATCH == 0 || FILTER_SIZE == 0 || factor_index < 0) {
printf("\033[31m ERROR: [normalizeByFactorInArrayGPU] FAILED\n");
}
int LENGHT = FILTER_BATCH * FILTER_SIZE;
const int dimension = ::ceil(std::sqrt(LENGHT));
dim3 grid_size(cuDivUp(dimension, GRID_SIZE),
cuDivUp(dimension, GRID_SIZE));
dim3 block_size(GRID_SIZE, GRID_SIZE);
float *d_factor;
hipMalloc(reinterpret_cast<void**>(&d_factor), sizeof(float));
hipLaunchKernelGGL(( getNormalizationFactorFromIndex), dim3(1), dim3(1), 0, 0,
d_factor, d_data, factor_index, 1);
hipDeviceSynchronize();
float factor;
hipMemcpy(&factor, d_factor, sizeof(float), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipLaunchKernelGGL(( normalizeByFactorKernel), dim3(grid_size), dim3(block_size), 0, 0,
d_data, factor, LENGHT);
hipFree(d_factor);
}
//! fast fourier transformation
hipfftComplex* cuFFTC2Cprocess(hipfftComplex *in_data,
const hipfftHandle handle,
const int FILTER_SIZE,
const int FILTER_BATCH) {
const int OUT_BYTE = FILTER_SIZE * FILTER_BATCH * sizeof(hipfftComplex);
hipfftResult cufft_status;
hipfftComplex *d_output;
hipMalloc(reinterpret_cast<void**>(&d_output), OUT_BYTE);
cufft_status = hipfftExecC2C(handle, in_data, d_output, HIPFFT_FORWARD);
if (cufft_status != hipSuccess) {
printf("[cuFFTC2Cprocess]: hipfftExecC2C failed!");
std::exit(-1); //! change to shutdown
}
return d_output;
}
/**
* memory resuse for tx1
*/
bool cuFFTC2Cprocess(hipfftComplex **d_output, hipfftComplex *in_data,
const hipfftHandle handle, const int FILTER_SIZE,
const int FILTER_BATCH) {
const int OUT_BYTE = FILTER_SIZE * FILTER_BATCH * sizeof(hipfftComplex);
hipfftResult cufft_status;
// hipfftComplex *d_output;
// hipMalloc(reinterpret_cast<void**>(&d_output), OUT_BYTE);
cufft_status = hipfftExecC2C(handle, in_data, *d_output, HIPFFT_FORWARD);
if (cufft_status != hipSuccess) {
printf("[cuFFTC2Cprocess]: hipfftExecC2C failed!");
return false;
}
return true;
}
float *invcuFFTC2CProcess(hipfftComplex *d_complex,
const hipfftHandle handle,
const int FILTER_SIZE,
const int FILTER_BATCH, bool is_normalize) {
if (FILTER_SIZE == 0 || FILTER_BATCH == 0) {
printf("\033[31m ERROR: [invcuFFTC2CProcess]: INPUTS = 0 \033[0m\n");
float empty[1];
return empty;
}
hipfftResult cufft_status = hipfftExecC2C(handle, d_complex,
d_complex, HIPFFT_BACKWARD);
if (cufft_status != hipSuccess) {
printf("inverse hipfftExecC2C failed!\n");
}
float *d_real = copyComplexRealToFloatGPU(d_complex,
FILTER_BATCH,
FILTER_SIZE);
if (is_normalize) {
float factor = FILTER_SIZE;
normalizeByFactorGPU(d_real, factor, FILTER_BATCH, FILTER_SIZE);
}
return d_real;
}
/**
* memory reusing for tx1
*/
bool invcuFFTC2CProcess(float **d_real,
hipfftComplex *d_complex,
const hipfftHandle handle,
const int FILTER_SIZE,
const int FILTER_BATCH, bool is_normalize) {
if (FILTER_SIZE == 0 || FILTER_BATCH == 0) {
printf("\033[31m ERROR: [invcuFFTC2CProcess]: INPUTS = 0 \033[0m\n");
return false;
}
hipfftResult cufft_status = hipfftExecC2C(handle, d_complex,
d_complex, HIPFFT_BACKWARD);
if (cufft_status != hipSuccess) {
printf("inverse hipfftExecC2C failed!\n");
}
copyComplexRealToFloatGPU(d_real, d_complex, FILTER_BATCH,
FILTER_SIZE);
if (is_normalize) {
float factor = FILTER_SIZE;
normalizeByFactorGPU(*d_real, factor, FILTER_BATCH, FILTER_SIZE);
}
return true;
}
| 9bc14db5d494bd235a52490249e56c34b1e047d2.cu |
#include <kernelized_correlation_filters_gpu/discrete_fourier_transform_kernel.h>
__global__
void cuFloatToComplexKernel(cufftComplex *d_complex,
const float *dev_data, const int lenght) {
int t_idx = threadIdx.x + blockIdx.x * blockDim.x;
int t_idy = threadIdx.y + blockIdx.y * blockDim.y;
int offset = t_idx + t_idy * blockDim.x * gridDim.x;
if (offset < lenght) {
d_complex[offset].x = dev_data[offset];
d_complex[offset].y = 0.0f;
}
}
cufftComplex* convertFloatToComplexGPU(const float *dev_data,
const int FILTER_BATCH,
const int FILTER_SIZE) {
if (FILTER_BATCH == 0 || FILTER_SIZE == 0) {
printf("\033[31m ERROR: [convertFloatToComplexGPU] FAILED\n");
}
int LENGHT = FILTER_BATCH * FILTER_SIZE;
const int BYTE = LENGHT * sizeof(cufftComplex);
cufftComplex *d_complex;
cudaMalloc(reinterpret_cast<void**>(&d_complex), BYTE);
const int dimension = std::ceil(std::sqrt(LENGHT));
dim3 grid_size(cuDivUp(dimension, GRID_SIZE),
cuDivUp(dimension, GRID_SIZE));
dim3 block_size(GRID_SIZE, GRID_SIZE);
cuFloatToComplexKernel<<<grid_size, block_size>>>(
d_complex, dev_data, LENGHT);
return d_complex;
}
/**
* memory reuse for tx1
*/
// cufftComplex*
bool convertFloatToComplexGPU(cufftComplex **d_complex,
const float *dev_data,
const int FILTER_BATCH,
const int FILTER_SIZE) {
if (FILTER_BATCH == 0 || FILTER_SIZE == 0) {
printf("\033[31m ERROR: [convertFloatToComplexGPU] FAILED\n");
return false;
}
int LENGHT = FILTER_BATCH * FILTER_SIZE;
// const int BYTE = LENGHT * sizeof(cufftComplex);
// cufftComplex *d_complex;
// cudaMalloc(reinterpret_cast<void**>(&d_complex), BYTE);
const int dimension = std::ceil(std::sqrt(LENGHT));
dim3 grid_size(cuDivUp(dimension, GRID_SIZE),
cuDivUp(dimension, GRID_SIZE));
dim3 block_size(GRID_SIZE, GRID_SIZE);
cuFloatToComplexKernel<<<grid_size, block_size>>>(
*d_complex, dev_data, LENGHT);
return true;
}
__global__
void copyComplexRealToFloatKernel(float *d_output,
const cufftComplex *d_complex,
const int lenght) {
int t_idx = threadIdx.x + blockIdx.x * blockDim.x;
int t_idy = threadIdx.y + blockIdx.y * blockDim.y;
int offset = t_idx + t_idy * blockDim.x * gridDim.x;
if (offset < lenght) {
d_output[offset] = d_complex[offset].x;
}
}
float* copyComplexRealToFloatGPU(const cufftComplex* d_complex,
const int FILTER_BATCH,
const int FILTER_SIZE) {
if (FILTER_BATCH == 0 || FILTER_SIZE == 0) {
printf("\033[31m ERROR: [copyComplexRealToFloatGPU] FAILED\n");
}
int LENGHT = FILTER_SIZE * FILTER_BATCH;
int BYTE = LENGHT * sizeof(float);
float *d_output;
cudaMalloc(reinterpret_cast<void**>(&d_output), BYTE);
const int dimension = std::ceil(std::sqrt(LENGHT));
dim3 grid_size(cuDivUp(dimension, GRID_SIZE),
cuDivUp(dimension, GRID_SIZE));
dim3 block_size(GRID_SIZE, GRID_SIZE);
copyComplexRealToFloatKernel<<<grid_size, block_size>>>(
d_output, d_complex, LENGHT);
return d_output;
}
/**
* memeory reusing for tx1
*/
bool copyComplexRealToFloatGPU(float **d_output,
const cufftComplex* d_complex,
const int FILTER_BATCH,
const int FILTER_SIZE) {
if (FILTER_BATCH == 0 || FILTER_SIZE == 0) {
printf("\033[31m ERROR: [copyComplexRealToFloatGPU] FAILED\n");
return false;
}
int LENGHT = FILTER_SIZE * FILTER_BATCH;
// int BYTE = LENGHT * sizeof(float);
// float *d_output;
// cudaMalloc(reinterpret_cast<void**>(&d_output), BYTE);
const int dimension = std::ceil(std::sqrt(LENGHT));
dim3 grid_size(cuDivUp(dimension, GRID_SIZE),
cuDivUp(dimension, GRID_SIZE));
dim3 block_size(GRID_SIZE, GRID_SIZE);
copyComplexRealToFloatKernel<<<grid_size, block_size>>>(
*d_output, d_complex, LENGHT);
return true;
}
//! normalize the data array but a given factor
__global__
void normalizeByFactorKernel(float *d_data,
const float factor,
const int lenght) {
int t_idx = threadIdx.x + blockIdx.x * blockDim.x;
int t_idy = threadIdx.y + blockIdx.y * blockDim.y;
int offset = t_idx + t_idy * blockDim.x * gridDim.x;
if (offset < lenght) {
d_data[offset] /= factor;
}
}
void normalizeByFactorGPU(float *&d_data,
const float factor,
const int FILTER_BATCH,
const int FILTER_SIZE) {
if (FILTER_BATCH == 0 || FILTER_SIZE == 0) {
printf("\033[31m ERROR: [normalizeByFactorGPU] FAILED\n");
}
int LENGHT = FILTER_BATCH * FILTER_SIZE;
const int BYTE = LENGHT * sizeof(float);
const int dimension = std::ceil(std::sqrt(LENGHT));
dim3 grid_size(cuDivUp(dimension, GRID_SIZE),
cuDivUp(dimension, GRID_SIZE));
dim3 block_size(GRID_SIZE, GRID_SIZE);
normalizeByFactorKernel<<<grid_size, block_size>>>(
d_data, factor, LENGHT);
}
//! normalize the data array A but a value in the array A
__global__
void getNormalizationFactorFromIndex(float *d_value,
const float *d_data,
const int factor_index,
const int lenght) {
int t_idx = threadIdx.x + blockIdx.x * blockDim.x;
int t_idy = threadIdx.y + blockIdx.y * blockDim.y;
int offset = t_idx + t_idy * blockDim.x * gridDim.x;
if (offset < lenght) {
*d_value = d_data[factor_index];
printf("info: %d %3.4f\n", factor_index, *d_value);
}
}
void normalizeByFactorInArrayGPU(float *&d_data,
const int factor_index,
const int FILTER_BATCH,
const int FILTER_SIZE) {
if (FILTER_BATCH == 0 || FILTER_SIZE == 0 || factor_index < 0) {
printf("\033[31m ERROR: [normalizeByFactorInArrayGPU] FAILED\n");
}
int LENGHT = FILTER_BATCH * FILTER_SIZE;
const int dimension = std::ceil(std::sqrt(LENGHT));
dim3 grid_size(cuDivUp(dimension, GRID_SIZE),
cuDivUp(dimension, GRID_SIZE));
dim3 block_size(GRID_SIZE, GRID_SIZE);
float *d_factor;
cudaMalloc(reinterpret_cast<void**>(&d_factor), sizeof(float));
getNormalizationFactorFromIndex<<<1, 1>>>(
d_factor, d_data, factor_index, 1);
cudaDeviceSynchronize();
float factor;
cudaMemcpy(&factor, d_factor, sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
normalizeByFactorKernel<<<grid_size, block_size>>>(
d_data, factor, LENGHT);
cudaFree(d_factor);
}
//! fast fourier transformation
cufftComplex* cuFFTC2Cprocess(cufftComplex *in_data,
const cufftHandle handle,
const int FILTER_SIZE,
const int FILTER_BATCH) {
const int OUT_BYTE = FILTER_SIZE * FILTER_BATCH * sizeof(cufftComplex);
cufftResult cufft_status;
cufftComplex *d_output;
cudaMalloc(reinterpret_cast<void**>(&d_output), OUT_BYTE);
cufft_status = cufftExecC2C(handle, in_data, d_output, CUFFT_FORWARD);
if (cufft_status != cudaSuccess) {
printf("[cuFFTC2Cprocess]: cufftExecC2C failed!");
std::exit(-1); //! change to shutdown
}
return d_output;
}
/**
* memory resuse for tx1
*/
bool cuFFTC2Cprocess(cufftComplex **d_output, cufftComplex *in_data,
const cufftHandle handle, const int FILTER_SIZE,
const int FILTER_BATCH) {
const int OUT_BYTE = FILTER_SIZE * FILTER_BATCH * sizeof(cufftComplex);
cufftResult cufft_status;
// cufftComplex *d_output;
// cudaMalloc(reinterpret_cast<void**>(&d_output), OUT_BYTE);
cufft_status = cufftExecC2C(handle, in_data, *d_output, CUFFT_FORWARD);
if (cufft_status != cudaSuccess) {
printf("[cuFFTC2Cprocess]: cufftExecC2C failed!");
return false;
}
return true;
}
float *invcuFFTC2CProcess(cufftComplex *d_complex,
const cufftHandle handle,
const int FILTER_SIZE,
const int FILTER_BATCH, bool is_normalize) {
if (FILTER_SIZE == 0 || FILTER_BATCH == 0) {
printf("\033[31m ERROR: [invcuFFTC2CProcess]: INPUTS = 0 \033[0m\n");
float empty[1];
return empty;
}
cufftResult cufft_status = cufftExecC2C(handle, d_complex,
d_complex, CUFFT_INVERSE);
if (cufft_status != cudaSuccess) {
printf("inverse cufftExecC2C failed!\n");
}
float *d_real = copyComplexRealToFloatGPU(d_complex,
FILTER_BATCH,
FILTER_SIZE);
if (is_normalize) {
float factor = FILTER_SIZE;
normalizeByFactorGPU(d_real, factor, FILTER_BATCH, FILTER_SIZE);
}
return d_real;
}
/**
* memory reusing for tx1
*/
bool invcuFFTC2CProcess(float **d_real,
cufftComplex *d_complex,
const cufftHandle handle,
const int FILTER_SIZE,
const int FILTER_BATCH, bool is_normalize) {
if (FILTER_SIZE == 0 || FILTER_BATCH == 0) {
printf("\033[31m ERROR: [invcuFFTC2CProcess]: INPUTS = 0 \033[0m\n");
return false;
}
cufftResult cufft_status = cufftExecC2C(handle, d_complex,
d_complex, CUFFT_INVERSE);
if (cufft_status != cudaSuccess) {
printf("inverse cufftExecC2C failed!\n");
}
copyComplexRealToFloatGPU(d_real, d_complex, FILTER_BATCH,
FILTER_SIZE);
if (is_normalize) {
float factor = FILTER_SIZE;
normalizeByFactorGPU(*d_real, factor, FILTER_BATCH, FILTER_SIZE);
}
return true;
}
|
b865086859dda274d3d3fd559e241017001e621f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <cstring>
#include <cudnn.h>
#include "common.h"
/**
* Performs a convolution (really cross-correlation) on the CPU.
*/
void ConvolutionHost(float *x, int batch_size, int c_in, int h_in, int w_in,
float *w, int c_out, int kernel_h, int kernel_w, int stride_h, int stride_w,
float *y) {
int h_out = (h_in - kernel_h) / stride_h + 1;
int w_out = (w_in - kernel_w) / stride_w + 1;
// Set the output to zero so we can accumulate
memset(y, 0, batch_size*c_out*h_out*w_out*sizeof(float));
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < c_out; ++j) {
for (int k = 0; k < h_out; ++k) {
for (int l = 0; l < w_out; ++l) {
// Compute the convolution for this output pixel
for (int m = 0; m < c_in; ++m) {
for (int n = 0; n < kernel_h; ++n) {
for (int o = 0; o < kernel_w; ++o) {
y[i*c_out*h_out*w_out + j*h_out*w_out + k*w_out + l] +=
w[j*c_in*kernel_h*kernel_w + m*kernel_h*kernel_w + n*kernel_w + o] *
x[i*c_in*h_in*w_in + m*h_in*w_in + ((k*stride_h)+n)*w_in + ((l*stride_w)+o)];
}
}
}
}
}
}
}
}
__global__ void ConvolutionKernel(float *x, int batch_size, int c_in, int h_in, int w_in,
float *w, int c_out, int kernel_h, int kernel_w, int stride_h, int stride_w,
float *y, int h_out, int w_out) {
int i = blockIdx.x; // datum index
int j = blockIdx.y; // feature map index
for (int k = threadIdx.y; k < h_out; k += blockDim.y) {
for (int l = threadIdx.x; l < w_out; l += blockDim.x) {
// Compute the convolution for this output pixel
for (int m = 0; m < c_in; ++m) {
for (int n = 0; n < kernel_h; ++n) {
for (int o = 0; o < kernel_w; ++o) {
y[i*c_out*h_out*w_out + j*h_out*w_out + k*w_out + l] +=
w[j*c_in*kernel_h*kernel_w + m*kernel_h*kernel_w + n*kernel_w + o] *
x[i*c_in*h_in*w_in + m*h_in*w_in + ((k*stride_h)+n)*w_in + ((l*stride_w)+o)];
}
}
}
}
}
}
void ConvolutionDevice(float *x, int batch_size, int c_in, int h_in, int w_in,
float *w, int c_out, int kernel_h, int kernel_w, int stride_h, int stride_w,
float *y, hipStream_t stream) {
int h_out = (h_in - kernel_h) / stride_h + 1;
int w_out = (w_in - kernel_w) / stride_w + 1;
CUDA_CALL(hipMemsetAsync(y, 0, batch_size*c_out*h_out*w_out*sizeof(float), stream));
hipLaunchKernelGGL(( ConvolutionKernel), dim3(dim3(batch_size, c_out)), dim3(dim3(32, 32)), 0, stream, x,
batch_size, c_in, h_in, w_in, w, c_out, kernel_h, kernel_w, stride_h,
stride_w, y, h_out, w_out);
}
int main() {
cudnnHandle_t handle;
CUDNN_CALL(cudnnCreate(&handle));
hipStream_t stream;
CUDA_CALL(hipStreamCreate(&stream));
CUDNN_CALL(cudnnSetStream(handle, stream));
//
/// Set dimensions for the convolution
//
// Kernel dims - we don't support padding in this example
int kernel_h = 3;
int kernel_w = 3;
int pad_h = 0;
int pad_w = 0;
int stride_h = 1;
int stride_w = 1;
// Input dims
int n = 32;
int h_in = 227;
int w_in = 227;
int c_in = 3;
// Output dims
int h_out = (h_in + 2*pad_h - kernel_h) / stride_h + 1;
int w_out = (w_in + 2*pad_w - kernel_w) / stride_w + 1;
int c_out = 32;
//
/// Setup data & filters for the convolution
//
int filter_size = c_out*c_in*kernel_h*kernel_w;
int images_size = n*c_in*h_in*w_in;
int output_size = n*c_out*h_out*w_out;
float *filters = new float[filter_size];
float *images = new float[images_size];
float *output = new float[output_size];
SetIncremental(filters, filter_size);
SetIncremental(images, images_size);
#ifdef DEBUG
cout << "Images: ";
PrintTensor(images, n, c_in, h_in, w_in);
cout << "Filters: ";
PrintTensor(filters, c_out, c_in, kernel_h, kernel_w);
#endif
// Setup device version of input, output, and filters
float *filters_on_device, *images_on_device, *output_on_device;
CUDA_CALL(hipMalloc(&filters_on_device, filter_size*sizeof(float)));
CUDA_CALL(hipMalloc(&images_on_device, images_size*sizeof(float)));
CUDA_CALL(hipMalloc(&output_on_device, output_size*sizeof(float)));
CUDA_CALL(hipMemcpy(
filters_on_device, filters,
filter_size*sizeof(float),
hipMemcpyHostToDevice
));
CUDA_CALL(hipMemcpy(
images_on_device,
images, images_size*sizeof(float),
hipMemcpyHostToDevice
));
//
/// Setup parameters for cudnn call
//
// Setup alpha/beta
float alpha = 1.f, beta = 0.f;
// Setup input/output tensor descriptors
cudnnTensorDescriptor_t x_desc, y_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(x_desc,
CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
n, c_in, h_in, w_in));
CUDNN_CALL(cudnnSetTensor4dDescriptor(y_desc,
CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
n, c_out, h_out, w_out));
// Setup filter descriptor
cudnnFilterDescriptor_t w_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&w_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(w_desc,
CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
c_out, c_in, kernel_h, kernel_w));
// Setup convolution meta-data
cudnnConvolutionDescriptor_t conv_desc;
cudnnConvolutionFwdAlgo_t conv_algo = CUDNN_CONVOLUTION_FWD_ALGO_GEMM;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(conv_desc,
pad_h, pad_w, stride_h, stride_w, 1, 1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
// Setup & allocate workspace
size_t workspace_size = 0;
void *workspace_on_device;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(handle, x_desc,
w_desc, conv_desc, y_desc, conv_algo, &workspace_size));
CUDA_CALL(hipMalloc(&workspace_on_device, workspace_size));
// Run the convolution
auto t1 = GetTime();
CUDNN_CALL(cudnnConvolutionForward(
handle,
&alpha,
x_desc,
images_on_device,
w_desc,
filters_on_device,
conv_desc,
conv_algo,
workspace_on_device,
workspace_size,
&beta,
y_desc,
output_on_device
));
CUDA_CALL(hipStreamSynchronize(stream));
float total_seconds = ElapsedTime(t1, GetTime());
cout << "CUDNN FPS: " << n / total_seconds << endl;
#ifdef DEBUG
cout << "Device Output: ";
PrintTensor(output_on_device, n, c_out, h_out, w_out);
#endif
// Do the host-side convolution
t1 = GetTime();
ConvolutionHost(
images,
n,
c_in,
h_in,
w_in,
filters,
c_out,
kernel_h,
kernel_w,
stride_h,
stride_w,
output);
total_seconds = ElapsedTime(t1, GetTime());
cout << "Host FPS: " << n / total_seconds << endl;
#ifdef DEBUG
cout << "Output: ";
PrintTensor(output, n, c_out, h_out, w_out);
#endif
// Verify the results
VerifyResults(output, output_on_device, n*c_out*h_out*w_out);
// Run the device convolution
t1 = GetTime();
ConvolutionDevice(
images_on_device,
n,
c_in,
h_in,
w_in,
filters_on_device,
c_out,
kernel_h,
kernel_w,
stride_h,
stride_w,
output_on_device,
stream
);
CUDA_CALL(hipStreamSynchronize(stream));
total_seconds = ElapsedTime(t1, GetTime());
cout << "Device FPS: " << n / total_seconds << endl;
// Verify the results
VerifyResults(output, output_on_device, n*c_out*h_out*w_out);
// clean up
CUDA_CALL(hipFree(workspace_on_device));
CUDA_CALL(hipFree(filters_on_device));
CUDA_CALL(hipFree(images_on_device));
CUDA_CALL(hipFree(output_on_device));
delete[] filters;
delete[] images;
delete[] output;
CUDNN_CALL(cudnnDestroy(handle));
}
| b865086859dda274d3d3fd559e241017001e621f.cu | #include <cmath>
#include <cstring>
#include <cudnn.h>
#include "common.h"
/**
* Performs a convolution (really cross-correlation) on the CPU.
*/
void ConvolutionHost(float *x, int batch_size, int c_in, int h_in, int w_in,
float *w, int c_out, int kernel_h, int kernel_w, int stride_h, int stride_w,
float *y) {
int h_out = (h_in - kernel_h) / stride_h + 1;
int w_out = (w_in - kernel_w) / stride_w + 1;
// Set the output to zero so we can accumulate
memset(y, 0, batch_size*c_out*h_out*w_out*sizeof(float));
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < c_out; ++j) {
for (int k = 0; k < h_out; ++k) {
for (int l = 0; l < w_out; ++l) {
// Compute the convolution for this output pixel
for (int m = 0; m < c_in; ++m) {
for (int n = 0; n < kernel_h; ++n) {
for (int o = 0; o < kernel_w; ++o) {
y[i*c_out*h_out*w_out + j*h_out*w_out + k*w_out + l] +=
w[j*c_in*kernel_h*kernel_w + m*kernel_h*kernel_w + n*kernel_w + o] *
x[i*c_in*h_in*w_in + m*h_in*w_in + ((k*stride_h)+n)*w_in + ((l*stride_w)+o)];
}
}
}
}
}
}
}
}
__global__ void ConvolutionKernel(float *x, int batch_size, int c_in, int h_in, int w_in,
float *w, int c_out, int kernel_h, int kernel_w, int stride_h, int stride_w,
float *y, int h_out, int w_out) {
int i = blockIdx.x; // datum index
int j = blockIdx.y; // feature map index
for (int k = threadIdx.y; k < h_out; k += blockDim.y) {
for (int l = threadIdx.x; l < w_out; l += blockDim.x) {
// Compute the convolution for this output pixel
for (int m = 0; m < c_in; ++m) {
for (int n = 0; n < kernel_h; ++n) {
for (int o = 0; o < kernel_w; ++o) {
y[i*c_out*h_out*w_out + j*h_out*w_out + k*w_out + l] +=
w[j*c_in*kernel_h*kernel_w + m*kernel_h*kernel_w + n*kernel_w + o] *
x[i*c_in*h_in*w_in + m*h_in*w_in + ((k*stride_h)+n)*w_in + ((l*stride_w)+o)];
}
}
}
}
}
}
void ConvolutionDevice(float *x, int batch_size, int c_in, int h_in, int w_in,
float *w, int c_out, int kernel_h, int kernel_w, int stride_h, int stride_w,
float *y, cudaStream_t stream) {
int h_out = (h_in - kernel_h) / stride_h + 1;
int w_out = (w_in - kernel_w) / stride_w + 1;
CUDA_CALL(cudaMemsetAsync(y, 0, batch_size*c_out*h_out*w_out*sizeof(float), stream));
ConvolutionKernel<<<dim3(batch_size, c_out), dim3(32, 32), 0, stream>>>(x,
batch_size, c_in, h_in, w_in, w, c_out, kernel_h, kernel_w, stride_h,
stride_w, y, h_out, w_out);
}
int main() {
cudnnHandle_t handle;
CUDNN_CALL(cudnnCreate(&handle));
cudaStream_t stream;
CUDA_CALL(cudaStreamCreate(&stream));
CUDNN_CALL(cudnnSetStream(handle, stream));
//
/// Set dimensions for the convolution
//
// Kernel dims - we don't support padding in this example
int kernel_h = 3;
int kernel_w = 3;
int pad_h = 0;
int pad_w = 0;
int stride_h = 1;
int stride_w = 1;
// Input dims
int n = 32;
int h_in = 227;
int w_in = 227;
int c_in = 3;
// Output dims
int h_out = (h_in + 2*pad_h - kernel_h) / stride_h + 1;
int w_out = (w_in + 2*pad_w - kernel_w) / stride_w + 1;
int c_out = 32;
//
/// Setup data & filters for the convolution
//
int filter_size = c_out*c_in*kernel_h*kernel_w;
int images_size = n*c_in*h_in*w_in;
int output_size = n*c_out*h_out*w_out;
float *filters = new float[filter_size];
float *images = new float[images_size];
float *output = new float[output_size];
SetIncremental(filters, filter_size);
SetIncremental(images, images_size);
#ifdef DEBUG
cout << "Images: ";
PrintTensor(images, n, c_in, h_in, w_in);
cout << "Filters: ";
PrintTensor(filters, c_out, c_in, kernel_h, kernel_w);
#endif
// Setup device version of input, output, and filters
float *filters_on_device, *images_on_device, *output_on_device;
CUDA_CALL(cudaMalloc(&filters_on_device, filter_size*sizeof(float)));
CUDA_CALL(cudaMalloc(&images_on_device, images_size*sizeof(float)));
CUDA_CALL(cudaMalloc(&output_on_device, output_size*sizeof(float)));
CUDA_CALL(cudaMemcpy(
filters_on_device, filters,
filter_size*sizeof(float),
cudaMemcpyHostToDevice
));
CUDA_CALL(cudaMemcpy(
images_on_device,
images, images_size*sizeof(float),
cudaMemcpyHostToDevice
));
//
/// Setup parameters for cudnn call
//
// Setup alpha/beta
float alpha = 1.f, beta = 0.f;
// Setup input/output tensor descriptors
cudnnTensorDescriptor_t x_desc, y_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(x_desc,
CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
n, c_in, h_in, w_in));
CUDNN_CALL(cudnnSetTensor4dDescriptor(y_desc,
CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
n, c_out, h_out, w_out));
// Setup filter descriptor
cudnnFilterDescriptor_t w_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&w_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(w_desc,
CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
c_out, c_in, kernel_h, kernel_w));
// Setup convolution meta-data
cudnnConvolutionDescriptor_t conv_desc;
cudnnConvolutionFwdAlgo_t conv_algo = CUDNN_CONVOLUTION_FWD_ALGO_GEMM;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(conv_desc,
pad_h, pad_w, stride_h, stride_w, 1, 1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
// Setup & allocate workspace
size_t workspace_size = 0;
void *workspace_on_device;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(handle, x_desc,
w_desc, conv_desc, y_desc, conv_algo, &workspace_size));
CUDA_CALL(cudaMalloc(&workspace_on_device, workspace_size));
// Run the convolution
auto t1 = GetTime();
CUDNN_CALL(cudnnConvolutionForward(
handle,
&alpha,
x_desc,
images_on_device,
w_desc,
filters_on_device,
conv_desc,
conv_algo,
workspace_on_device,
workspace_size,
&beta,
y_desc,
output_on_device
));
CUDA_CALL(cudaStreamSynchronize(stream));
float total_seconds = ElapsedTime(t1, GetTime());
cout << "CUDNN FPS: " << n / total_seconds << endl;
#ifdef DEBUG
cout << "Device Output: ";
PrintTensor(output_on_device, n, c_out, h_out, w_out);
#endif
// Do the host-side convolution
t1 = GetTime();
ConvolutionHost(
images,
n,
c_in,
h_in,
w_in,
filters,
c_out,
kernel_h,
kernel_w,
stride_h,
stride_w,
output);
total_seconds = ElapsedTime(t1, GetTime());
cout << "Host FPS: " << n / total_seconds << endl;
#ifdef DEBUG
cout << "Output: ";
PrintTensor(output, n, c_out, h_out, w_out);
#endif
// Verify the results
VerifyResults(output, output_on_device, n*c_out*h_out*w_out);
// Run the device convolution
t1 = GetTime();
ConvolutionDevice(
images_on_device,
n,
c_in,
h_in,
w_in,
filters_on_device,
c_out,
kernel_h,
kernel_w,
stride_h,
stride_w,
output_on_device,
stream
);
CUDA_CALL(cudaStreamSynchronize(stream));
total_seconds = ElapsedTime(t1, GetTime());
cout << "Device FPS: " << n / total_seconds << endl;
// Verify the results
VerifyResults(output, output_on_device, n*c_out*h_out*w_out);
// clean up
CUDA_CALL(cudaFree(workspace_on_device));
CUDA_CALL(cudaFree(filters_on_device));
CUDA_CALL(cudaFree(images_on_device));
CUDA_CALL(cudaFree(output_on_device));
delete[] filters;
delete[] images;
delete[] output;
CUDNN_CALL(cudnnDestroy(handle));
}
|
d7ab02a2a1c3be5995b1a2fad420f97d47c7d45d.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <chrono>
#include <sstream>
#include <string>
#include <ctime>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include<algorithm>
#include <vector>
#define d 0.85
#define epsilon 0.00000001
#include <time.h>
using namespace std;
using namespace std::chrono;
time_t debut,fin;
__global__ void mul(float *matrice,float *state,int maxval)
{
int idx=blockIdx.x * blockDim.x + threadIdx.x;
int ty = threadIdx.x;
int tx = blockIdx.x;
float resultat;
if (idx < maxval)
{
for (int i =0;i<maxval;i++)
{
resultat+=state[i]*matrice[maxval*i+ty];
}
state[idx]=resultat;
}
}
int main(){
const int n = 4;
// On ouvre le fichier txt pour le parser
ifstream graph;
char chemin[500];
int max_iter;
cout << "Entrer le chemin entier avec des doubles backslashs"<< endl;
cin >> chemin;
cout << "\nCombien d iterations maximale ?";
cin >> max_iter;
high_resolution_clock::time_point t1 = high_resolution_clock::now();
time(&debut);
graph.open(chemin);
vector<int> from;
vector<int> to;
string word,line;
// On va parser en fonction du separateur nos from/To
if (!graph.is_open())
{
cout<<"Ouverture impossible";
return false;
}
while (getline(graph,line))
{
string espace;
std::istringstream text_stream(line);
text_stream >> word;
from.push_back(stoi(word));
getline(text_stream,espace,' ');
text_stream >> word;
to.push_back(stoi(word));
}
graph.close();
cout << "Ouverture du fichier texte reussi" << endl;
// calcul du maximum pour construire la matrice carree
double maxiFrom = *max_element(from.begin(), from.end());
double maxiTo = *max_element(to.begin(), to.end());
int maxval;
if (maxiFrom < maxiTo){
maxval = maxiTo+1;
}
else
{
maxval = maxiFrom+1;
}
float* matrice = new float[n*n];
for (int i = 0;i<maxval*maxval;i++)
{
matrice[i]=0;
}
int idx_i,idx_j;
for (int i =0;i<maxval*2 -1;i++)
{
idx_i = from[i];
idx_j = to[i];
matrice[(idx_j)*maxval+idx_i]=1;
}
float sum;
// On normalise la matrice pour tenir compte du nb de pages linkantes
for (int i = 0;i<maxval;i++)
{
sum = 0;
for (int j =0;j<maxval;j++)
{
if (matrice[i*maxval+j]>0)
{
sum++;
}
}
if (sum ==0)
{
sum = maxval;
for(int j = 0;j<maxval;j++)
{
matrice[i*maxval+j]=1;
}
}
for(int j = 0;j<maxval;j++)
{
matrice[i*maxval+j]/=sum;
}
}
// Debut de l algorithme PageRank
float maxval2 = maxval;
float state[n];
float old[n];
float delta[n];
//float temp[n];
float alpha =((1-d)/maxval);
float* d_matrix;
float* d_state;
for (int i = 0;i<maxval;i++)
{
state[i] = 1/maxval2;
}
for (int i =0;i<maxval*maxval;i++)
{
matrice[i]=matrice[i]*d+alpha;
}
hipMalloc((void**)&d_matrix,n*n*sizeof(float));
hipMalloc((void**)&d_state,n*sizeof(float));
hipMemcpy(d_matrix,matrice,n*n*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(d_state,state,n*sizeof(float),hipMemcpyHostToDevice);
dim3 dimGrid(1,1);
dim3 dimBlock(n,n);
for (int i = 0;i<=max_iter;i++)
{
float check = 0;
for (int j = 0;j<maxval;j++)
{
old[j] = state[j];
}
hipLaunchKernelGGL(( mul), dim3(4),dim3(4), 0, 0, d_matrix,d_state,n);
hipDeviceSynchronize();
hipMemcpy(state,d_state,n*sizeof(float),hipMemcpyDeviceToHost);
for (int h = 0;h<maxval;h++)
{
delta[h] = state[h] - old[h];
}
for (int h = 0;h<maxval;h++)
{
check+=delta[h]*delta[h];
}
if (check < epsilon)
{
cout << "On quitte la boucle apres "<< i<< " iterations.\n";
break;
}
}
// output
ofstream out;
out.open("C:\\Users\\Prugniaud\\projects\\output.txt");
for(int i =0;i<maxval;i++)
{
out << to_string(state[i]);
out<< '\n';
}
out.close();
high_resolution_clock::time_point t2 = high_resolution_clock::now();
duration<double, std::milli> time_span = t2 - t1;
time(&fin);
hipFree(d_matrix);
hipFree(d_state);
cout << "Temps d execution : "<<difftime(fin,debut)<<" secondes"<<endl;
cout << "Temps d execution : "<<time_span.count()<<" millisecondes"<<endl;
cout << "Fin du programme, Appuyez sur Entrer pour quitter" << endl;
getchar();
return 0;
}
| d7ab02a2a1c3be5995b1a2fad420f97d47c7d45d.cu | #include <iostream>
#include <fstream>
#include <chrono>
#include <sstream>
#include <string>
#include <ctime>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<algorithm>
#include <vector>
#define d 0.85
#define epsilon 0.00000001
#include <time.h>
using namespace std;
using namespace std::chrono;
time_t debut,fin;
__global__ void mul(float *matrice,float *state,int maxval)
{
int idx=blockIdx.x * blockDim.x + threadIdx.x;
int ty = threadIdx.x;
int tx = blockIdx.x;
float resultat;
if (idx < maxval)
{
for (int i =0;i<maxval;i++)
{
resultat+=state[i]*matrice[maxval*i+ty];
}
state[idx]=resultat;
}
}
int main(){
const int n = 4;
// On ouvre le fichier txt pour le parser
ifstream graph;
char chemin[500];
int max_iter;
cout << "Entrer le chemin entier avec des doubles backslashs"<< endl;
cin >> chemin;
cout << "\nCombien d iterations maximale ?";
cin >> max_iter;
high_resolution_clock::time_point t1 = high_resolution_clock::now();
time(&debut);
graph.open(chemin);
vector<int> from;
vector<int> to;
string word,line;
// On va parser en fonction du separateur nos from/To
if (!graph.is_open())
{
cout<<"Ouverture impossible";
return false;
}
while (getline(graph,line))
{
string espace;
std::istringstream text_stream(line);
text_stream >> word;
from.push_back(stoi(word));
getline(text_stream,espace,' ');
text_stream >> word;
to.push_back(stoi(word));
}
graph.close();
cout << "Ouverture du fichier texte reussi" << endl;
// calcul du maximum pour construire la matrice carree
double maxiFrom = *max_element(from.begin(), from.end());
double maxiTo = *max_element(to.begin(), to.end());
int maxval;
if (maxiFrom < maxiTo){
maxval = maxiTo+1;
}
else
{
maxval = maxiFrom+1;
}
float* matrice = new float[n*n];
for (int i = 0;i<maxval*maxval;i++)
{
matrice[i]=0;
}
int idx_i,idx_j;
for (int i =0;i<maxval*2 -1;i++)
{
idx_i = from[i];
idx_j = to[i];
matrice[(idx_j)*maxval+idx_i]=1;
}
float sum;
// On normalise la matrice pour tenir compte du nb de pages linkantes
for (int i = 0;i<maxval;i++)
{
sum = 0;
for (int j =0;j<maxval;j++)
{
if (matrice[i*maxval+j]>0)
{
sum++;
}
}
if (sum ==0)
{
sum = maxval;
for(int j = 0;j<maxval;j++)
{
matrice[i*maxval+j]=1;
}
}
for(int j = 0;j<maxval;j++)
{
matrice[i*maxval+j]/=sum;
}
}
// Debut de l algorithme PageRank
float maxval2 = maxval;
float state[n];
float old[n];
float delta[n];
//float temp[n];
float alpha =((1-d)/maxval);
float* d_matrix;
float* d_state;
for (int i = 0;i<maxval;i++)
{
state[i] = 1/maxval2;
}
for (int i =0;i<maxval*maxval;i++)
{
matrice[i]=matrice[i]*d+alpha;
}
cudaMalloc((void**)&d_matrix,n*n*sizeof(float));
cudaMalloc((void**)&d_state,n*sizeof(float));
cudaMemcpy(d_matrix,matrice,n*n*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_state,state,n*sizeof(float),cudaMemcpyHostToDevice);
dim3 dimGrid(1,1);
dim3 dimBlock(n,n);
for (int i = 0;i<=max_iter;i++)
{
float check = 0;
for (int j = 0;j<maxval;j++)
{
old[j] = state[j];
}
mul<<<4,4>>>(d_matrix,d_state,n);
cudaDeviceSynchronize();
cudaMemcpy(state,d_state,n*sizeof(float),cudaMemcpyDeviceToHost);
for (int h = 0;h<maxval;h++)
{
delta[h] = state[h] - old[h];
}
for (int h = 0;h<maxval;h++)
{
check+=delta[h]*delta[h];
}
if (check < epsilon)
{
cout << "On quitte la boucle apres "<< i<< " iterations.\n";
break;
}
}
// output
ofstream out;
out.open("C:\\Users\\Prugniaud\\projects\\output.txt");
for(int i =0;i<maxval;i++)
{
out << to_string(state[i]);
out<< '\n';
}
out.close();
high_resolution_clock::time_point t2 = high_resolution_clock::now();
duration<double, std::milli> time_span = t2 - t1;
time(&fin);
cudaFree(d_matrix);
cudaFree(d_state);
cout << "Temps d execution : "<<difftime(fin,debut)<<" secondes"<<endl;
cout << "Temps d execution : "<<time_span.count()<<" millisecondes"<<endl;
cout << "Fin du programme, Appuyez sur Entrer pour quitter" << endl;
getchar();
return 0;
}
|
621437dd18b2e418120a5356cbc57af5ac774daa.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernelMagicUpsampleX.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
int _w = 1;
int _h = 1;
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernelMagicUpsampleX), dim3(gridBlock),dim3(threadBlock), 0, 0, in,_w,_h,out);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernelMagicUpsampleX), dim3(gridBlock),dim3(threadBlock), 0, 0, in,_w,_h,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernelMagicUpsampleX), dim3(gridBlock),dim3(threadBlock), 0, 0, in,_w,_h,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 621437dd18b2e418120a5356cbc57af5ac774daa.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernelMagicUpsampleX.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
int _w = 1;
int _h = 1;
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernelMagicUpsampleX<<<gridBlock,threadBlock>>>(in,_w,_h,out);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernelMagicUpsampleX<<<gridBlock,threadBlock>>>(in,_w,_h,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernelMagicUpsampleX<<<gridBlock,threadBlock>>>(in,_w,_h,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
111785be534486e0dcb69086a0da0c571580e2ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//==================================================================================================
// Written in 2016 by Peter Shirley <ptrshrl@gmail.com>
//
// To the extent possible under law, the author(s) have dedicated all copyright and related and
// neighboring rights to this software to the public domain worldwide. This software is distributed
// without any warranty.
//
// You should have received a copy (see file COPYING.txt) of the CC0 Public Domain Dedication along
// with this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
// This code was change following the tutorial of Roger Allen https://devblogs.nvidia.com/accelerated-ray-tracing-cuda/
//==================================================================================================
#include <iostream>
#include "sphere.h"
#include "hitable_list.h"
#include "float.h"
#include "vec3.h"
#include "ray.h"
#include <chrono>
// traca os raios de luz
__device__ vec3 color(const ray& r, hitable **world) {
hit_record rec;
if ((*world)->hit(r, 0.0, FLT_MAX, rec)) {
return 0.5f*vec3(rec.normal.x()+1.0f, rec.normal.y()+1.0f, rec.normal.z()+1.0f);
}
else {
vec3 unit_direction = unit_vector(r.direction());
float t = 0.5f*(unit_direction.y() + 1.0f);
return (1.0f-t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0);
}
}
// hitable *random_scene() {
// int n = 500;
// hitable **list = new hitable*[n+1];
// list[0] = new sphere(vec3(0,-1000,0), 1000, new lambertian(vec3(0.5, 0.5, 0.5)));
// int i = 1;
// for (int a = -11; a < 11; a++) {
// for (int b = -11; b < 11; b++) {
// float choose_mat = drand48();
// vec3 center(a+0.9*drand48(),0.2,b+0.9*drand48());
// if ((center-vec3(4,0.2,0)).length() > 0.9) {
// if (choose_mat < 0.8) { // diffuse
// list[i++] = new sphere(center, 0.2, new lambertian(vec3(drand48()*drand48(), drand48()*drand48(), drand48()*drand48())));
// }
// else if (choose_mat < 0.95) { // metal
// list[i++] = new sphere(center, 0.2,
// new metal(vec3(0.5*(1 + drand48()), 0.5*(1 + drand48()), 0.5*(1 + drand48())), 0.5*drand48()));
// }
// else { // glass
// list[i++] = new sphere(center, 0.2, new dielectric(1.5));
// }
// }
// }
// }
// list[i++] = new sphere(vec3(0, 1, 0), 1.0, new dielectric(1.5));
// list[i++] = new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1)));
// list[i++] = new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0));
// return new hitable_list(list,i);
// }
// pinta a imagem
__global__ void rgb(vec3 *fb, int max_x, int max_y,vec3 lower_left_corner, vec3 horizontal, vec3 vertical, vec3 origin, hitable **world) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
float u = float(i) / float(max_x);
float v = float(j) / float(max_y);
ray r(origin, lower_left_corner + u*horizontal + v*vertical);
fb[pixel_index] = color(r, world);
}
// instancia as esferas
__global__ void create_sphere(hitable **d_list, hitable **d_world) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
*(d_list) = new sphere(vec3(0,0,-1), 0.5);
*(d_list+1) = new sphere(vec3(0,-100.5,-1), 100);
*(d_list+2) = new sphere(vec3(1, 0.2,-1), 0.2);
*(d_list+3) = new sphere(vec3(0, 1,-1), 1);
*(d_list+4) = new sphere(vec3(1, 1,-1), 0.5);
*(d_list+5) = new sphere(vec3(0.4, 0.6,-1), 0.3);
*d_world = new hitable_list(d_list,6);
}
}
// deleta memorias
__global__ void free_memory(hitable **d_list, hitable **d_world) {
delete *(d_list);
delete *(d_list+1);
delete *d_world;
}
int main() {
int nx = 1200;
int ny = 800;
// int ns = 10;
int tx = 8;//divisoes que vai ser cortada a imagem
int ty = 8;//divisoes que vai ser cortada a imagem
int num_pixels = nx*ny;
size_t fb_size = num_pixels*sizeof(vec3);
using namespace std::chrono;
high_resolution_clock::time_point begin = high_resolution_clock::now();
// allocate FB
vec3 *fb;
hipMallocManaged((void **)&fb, fb_size);
hitable **d_list;
hipMalloc((void **)&d_list, 6*sizeof(hitable *));
hitable **d_world;
hipMalloc((void **)&d_world, sizeof(hitable *));
hipLaunchKernelGGL(( create_sphere), dim3(1),dim3(1), 0, 0, d_list,d_world);
hipDeviceSynchronize();
dim3 block_size(nx/tx+1,ny/ty+1);//tamanho de cada grid
dim3 size_grid(tx,ty);//tamanho do grid
hipLaunchKernelGGL(( rgb), dim3(block_size), dim3(size_grid), 0, 0, fb, nx, ny, vec3(-2.0, -1.0, -1.0), vec3(4.0, 0.0, 0.0), vec3(0.0, 2.0, 0.0), vec3(0.0, 0.0, 0.0),d_world);//manda para a GPU calcular
hipDeviceSynchronize();
std::cout << "P3\n" << nx << " " << ny << "\n255\n";
// hitable *list[5];
for (int j = ny-1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j*nx + i;
// size_t pixel_index = j*3*nx + i*3;
int ir = int(255.99*fb[pixel_index][0]);
int ig = int(255.99*fb[pixel_index][1]);
int ib = int(255.99*fb[pixel_index][2]);
std::cout << ir << " " << ig << " " << ib << "\n";
}
}
high_resolution_clock::time_point end = high_resolution_clock::now();
duration<double> time_span = duration_cast<duration<double>>(end - begin);
std::cerr << "Tempo: " << time_span.count();
hipDeviceSynchronize();
hipLaunchKernelGGL(( free_memory), dim3(1),dim3(1), 0, 0, d_list,d_world);
hipFree(d_list);
hipFree(d_world);
hipFree(fb);
hipDeviceReset();
} | 111785be534486e0dcb69086a0da0c571580e2ef.cu | //==================================================================================================
// Written in 2016 by Peter Shirley <ptrshrl@gmail.com>
//
// To the extent possible under law, the author(s) have dedicated all copyright and related and
// neighboring rights to this software to the public domain worldwide. This software is distributed
// without any warranty.
//
// You should have received a copy (see file COPYING.txt) of the CC0 Public Domain Dedication along
// with this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
// This code was change following the tutorial of Roger Allen https://devblogs.nvidia.com/accelerated-ray-tracing-cuda/
//==================================================================================================
#include <iostream>
#include "sphere.h"
#include "hitable_list.h"
#include "float.h"
#include "vec3.h"
#include "ray.h"
#include <chrono>
// traca os raios de luz
__device__ vec3 color(const ray& r, hitable **world) {
hit_record rec;
if ((*world)->hit(r, 0.0, FLT_MAX, rec)) {
return 0.5f*vec3(rec.normal.x()+1.0f, rec.normal.y()+1.0f, rec.normal.z()+1.0f);
}
else {
vec3 unit_direction = unit_vector(r.direction());
float t = 0.5f*(unit_direction.y() + 1.0f);
return (1.0f-t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0);
}
}
// hitable *random_scene() {
// int n = 500;
// hitable **list = new hitable*[n+1];
// list[0] = new sphere(vec3(0,-1000,0), 1000, new lambertian(vec3(0.5, 0.5, 0.5)));
// int i = 1;
// for (int a = -11; a < 11; a++) {
// for (int b = -11; b < 11; b++) {
// float choose_mat = drand48();
// vec3 center(a+0.9*drand48(),0.2,b+0.9*drand48());
// if ((center-vec3(4,0.2,0)).length() > 0.9) {
// if (choose_mat < 0.8) { // diffuse
// list[i++] = new sphere(center, 0.2, new lambertian(vec3(drand48()*drand48(), drand48()*drand48(), drand48()*drand48())));
// }
// else if (choose_mat < 0.95) { // metal
// list[i++] = new sphere(center, 0.2,
// new metal(vec3(0.5*(1 + drand48()), 0.5*(1 + drand48()), 0.5*(1 + drand48())), 0.5*drand48()));
// }
// else { // glass
// list[i++] = new sphere(center, 0.2, new dielectric(1.5));
// }
// }
// }
// }
// list[i++] = new sphere(vec3(0, 1, 0), 1.0, new dielectric(1.5));
// list[i++] = new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1)));
// list[i++] = new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0));
// return new hitable_list(list,i);
// }
// pinta a imagem
__global__ void rgb(vec3 *fb, int max_x, int max_y,vec3 lower_left_corner, vec3 horizontal, vec3 vertical, vec3 origin, hitable **world) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
float u = float(i) / float(max_x);
float v = float(j) / float(max_y);
ray r(origin, lower_left_corner + u*horizontal + v*vertical);
fb[pixel_index] = color(r, world);
}
// instancia as esferas
__global__ void create_sphere(hitable **d_list, hitable **d_world) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
*(d_list) = new sphere(vec3(0,0,-1), 0.5);
*(d_list+1) = new sphere(vec3(0,-100.5,-1), 100);
*(d_list+2) = new sphere(vec3(1, 0.2,-1), 0.2);
*(d_list+3) = new sphere(vec3(0, 1,-1), 1);
*(d_list+4) = new sphere(vec3(1, 1,-1), 0.5);
*(d_list+5) = new sphere(vec3(0.4, 0.6,-1), 0.3);
*d_world = new hitable_list(d_list,6);
}
}
// deleta memorias
__global__ void free_memory(hitable **d_list, hitable **d_world) {
delete *(d_list);
delete *(d_list+1);
delete *d_world;
}
int main() {
int nx = 1200;
int ny = 800;
// int ns = 10;
int tx = 8;//divisoes que vai ser cortada a imagem
int ty = 8;//divisoes que vai ser cortada a imagem
int num_pixels = nx*ny;
size_t fb_size = num_pixels*sizeof(vec3);
using namespace std::chrono;
high_resolution_clock::time_point begin = high_resolution_clock::now();
// allocate FB
vec3 *fb;
cudaMallocManaged((void **)&fb, fb_size);
hitable **d_list;
cudaMalloc((void **)&d_list, 6*sizeof(hitable *));
hitable **d_world;
cudaMalloc((void **)&d_world, sizeof(hitable *));
create_sphere<<<1,1>>>(d_list,d_world);
cudaDeviceSynchronize();
dim3 block_size(nx/tx+1,ny/ty+1);//tamanho de cada grid
dim3 size_grid(tx,ty);//tamanho do grid
rgb<<<block_size, size_grid>>>(fb, nx, ny, vec3(-2.0, -1.0, -1.0), vec3(4.0, 0.0, 0.0), vec3(0.0, 2.0, 0.0), vec3(0.0, 0.0, 0.0),d_world);//manda para a GPU calcular
cudaDeviceSynchronize();
std::cout << "P3\n" << nx << " " << ny << "\n255\n";
// hitable *list[5];
for (int j = ny-1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j*nx + i;
// size_t pixel_index = j*3*nx + i*3;
int ir = int(255.99*fb[pixel_index][0]);
int ig = int(255.99*fb[pixel_index][1]);
int ib = int(255.99*fb[pixel_index][2]);
std::cout << ir << " " << ig << " " << ib << "\n";
}
}
high_resolution_clock::time_point end = high_resolution_clock::now();
duration<double> time_span = duration_cast<duration<double>>(end - begin);
std::cerr << "Tempo: " << time_span.count();
cudaDeviceSynchronize();
free_memory<<<1,1>>>(d_list,d_world);
cudaFree(d_list);
cudaFree(d_world);
cudaFree(fb);
cudaDeviceReset();
} |
695c2db0573ba798b421c75acf8ee1eb7554327c.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/common_layers.hpp"
#include "caffe/util/math_functions.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
template<typename Dtype>
void SilenceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// Do nothing.
}
template<typename Dtype>
void SilenceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
for (int_tp i = 0; i < bottom.size(); ++i) {
if (propagate_down[i]) {
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
caffe_gpu_set(bottom[i]->count(), Dtype(0),
bottom[i]->mutable_gpu_diff());
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_->id());
viennacl::ocl::kernel &oclk_gpu_set = program.get_kernel(
CL_KERNEL_SELECT("gpu_set"));
viennacl::ocl::enqueue(
oclk_gpu_set(
bottom[i]->count(), Dtype(0),
WrapHandle((cl_mem) bottom[i]->mutable_gpu_diff(), &ctx)),
ctx.get_queue());
ctx.get_queue().finish();
#endif
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SilenceLayer);
} // namespace caffe
| 695c2db0573ba798b421c75acf8ee1eb7554327c.cu | #include <vector>
#include "caffe/common_layers.hpp"
#include "caffe/util/math_functions.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
template<typename Dtype>
void SilenceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// Do nothing.
}
template<typename Dtype>
void SilenceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
for (int_tp i = 0; i < bottom.size(); ++i) {
if (propagate_down[i]) {
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
caffe_gpu_set(bottom[i]->count(), Dtype(0),
bottom[i]->mutable_gpu_diff());
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_->id());
viennacl::ocl::kernel &oclk_gpu_set = program.get_kernel(
CL_KERNEL_SELECT("gpu_set"));
viennacl::ocl::enqueue(
oclk_gpu_set(
bottom[i]->count(), Dtype(0),
WrapHandle((cl_mem) bottom[i]->mutable_gpu_diff(), &ctx)),
ctx.get_queue());
ctx.get_queue().finish();
#endif
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SilenceLayer);
} // namespace caffe
|
7ea1e7f3bb50e2541546ac87e73e2d6e9e6eccc9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// modified from
// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu
// modified from
// https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda.cpp
#include "deform_conv2d.h"
namespace nvinfer1 {
namespace plugin {
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = (blockIdx.x * blockDim.x) + threadIdx.x; i < (n); i += (blockDim.x * gridDim.x))
template <typename integer>
constexpr __host__ __device__ inline integer ceil_div(integer n, integer m) {
return (n + m - 1) / m;
}
/******************* add_bias_kernelLauncher ***********************/
template <typename T>
__global__ void add_bias(T* x, const T* bias, int n) {
const int bid = blockIdx.x;
auto b = __ldg(&bias[bid]);
for (int tid = threadIdx.x; tid < n; tid += blockDim.x)
x[bid * n + tid] += b;
}
// [channel, batch, H, W] x + [channel] bias
template <typename T>
void add_bias_kernelLauncher(T* x, const T* bias, int channel, int batch, int H, int W, hipStream_t stream) {
dim3 grid(channel);
int n = W * H * batch;
int blockSize = n;
if (std::is_same<T, half>::value && (n % 2 == 0)) {
blockSize = n / 2;
if (blockSize > 1024)
blockSize = 1024;
hipLaunchKernelGGL(( add_bias), dim3(grid), dim3(blockSize), 0, stream, (half2*)x, (const half2*)bias, n / 2);
} else {
if (blockSize > 1024)
blockSize = 1024;
hipLaunchKernelGGL(( add_bias), dim3(grid), dim3(blockSize), 0, stream, x, bias, n);
}
}
template <typename T>
__device__ T bilinear_interpolate(const T* in, int height, int width, T h, T w) {
if (h <= T(-1) || T(height) <= h || w <= T(-1) || T(width) <= w) {
return T(0);
}
int h_low = floor((float)h);
int w_low = floor((float)w);
int h_high = h_low + 1;
int w_high = w_low + 1;
T lh = h - T(h_low);
T lw = w - T(w_low);
T hh = T(1) - lh, hw = T(1) - lw;
T v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = __ldg(&in[h_low * width + w_low]);
T v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = __ldg(&in[h_low * width + w_high]);
T v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = __ldg(&in[h_high * width + w_low]);
T v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = __ldg(&in[h_high * width + w_high]);
T w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void deformable_im2col_kernel(
int n,
const T* input_ptr,
const T* offset_ptr,
const T* mask_ptr,
int height,
int width,
int weight_h,
int weight_w,
int pad_h,
int pad_w,
int stride_h,
int stride_w,
int dilation_h,
int dilation_w,
int batch_sz,
int n_in_channels,
int n_offset_grps,
int out_h,
int out_w,
bool use_mask,
T* columns_ptr) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int out_x = index % out_w;
const int out_y = (index / out_w) % out_h;
const int out_b = (index / (out_w * out_h)) % batch_sz;
const int in_c = index / (out_w * out_h * batch_sz);
const int out_c = in_c * weight_h * weight_w;
int c_per_offset_grp = n_in_channels / n_offset_grps;
const int grp_idx = in_c / c_per_offset_grp;
columns_ptr += (out_c * (batch_sz * out_h * out_w) + out_b * (out_h * out_w) + out_y * out_w + out_x);
input_ptr += (out_b * (n_in_channels * height * width) + in_c * (height * width));
offset_ptr += (out_b * n_offset_grps + grp_idx) * 2 * weight_h * weight_w * out_h * out_w;
if (use_mask) {
mask_ptr += (out_b * n_offset_grps + grp_idx) * weight_h * weight_w * out_h * out_w;
}
for (int i = 0; i < weight_h; ++i) {
for (int j = 0; j < weight_w; ++j) {
const int mask_idx = i * weight_w + j;
const int offset_idx = 2 * mask_idx;
T mask_value = 1;
if (use_mask) {
mask_value = __ldg(&mask_ptr[mask_idx * (out_h * out_w) + out_y * out_w + out_x]);
}
const T offset_h = __ldg(&offset_ptr[offset_idx * (out_h * out_w) + out_y * out_w + out_x]);
const T offset_w = __ldg(&offset_ptr[(offset_idx + 1) * (out_h * out_w) + out_y * out_w + out_x]);
const T y = T(out_y * stride_h - pad_h) + T(i * dilation_h) + offset_h;
const T x = T(out_x * stride_w - pad_w) + T(j * dilation_w) + offset_w;
*columns_ptr = mask_value * bilinear_interpolate(input_ptr, height, width, y, x);
columns_ptr += batch_sz * out_h * out_w;
}
}
}
}
// input, weight, output are row-major
template <typename T>
void gemm(
T* C,
const T* A,
const T* B,
const int m,
const int n,
const int k,
const int lda,
const int ldb,
const int ldc,
hipblasOperation_t trans_a,
hipblasOperation_t trans_b,
hipblasHandle_t cublas_handle,
float scale = 1.0f) {
hipDataType Atype, Btype, Ctype, computeType;
float alpha_float = scale;
float beta_float = 0.0f;
half alpha_half = half(scale);
half beta_half = half(0.0f);
void *alpha, *beta;
int cublasAlgo;
if (std::is_same<T, float>::value) {
computeType = HIP_R_32F;
Atype = HIP_R_32F;
Btype = HIP_R_32F;
Ctype = HIP_R_32F;
alpha = &alpha_float;
beta = &beta_float;
cublasAlgo = HIPBLAS_GEMM_DEFAULT;
} else {
computeType = HIP_R_16F;
Atype = HIP_R_16F;
Btype = HIP_R_16F;
Ctype = HIP_R_16F;
alpha = &alpha_half;
beta = &beta_half;
cublasAlgo = CUBLAS_GEMM_DEFAULT_TENSOR_OP;
}
hipblasGemmEx(
cublas_handle,
trans_a,
trans_b,
m,
n,
k,
alpha,
A,
Atype,
lda,
B,
Btype,
ldb,
beta,
C,
Ctype,
ldc,
computeType,
static_cast<hipblasGemmAlgo_t>(cublasAlgo));
}
int DeformConv2D::enqueue(
const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc,
const void* const* inputs,
void* const* outputs,
void* workspace,
hipStream_t stream) {
auto input_shape = inputDesc[0].dims;
auto bs = input_shape.d[0];
auto in_c = input_shape.d[1];
int in_h = input_shape.d[2];
int in_w = input_shape.d[3];
auto output_shape = outputDesc[0].dims;
int out_c = output_shape.d[1];
int out_h = output_shape.d[2];
int out_w = output_shape.d[3];
auto input = inputs[0];
auto offset = inputs[1];
auto mask = inputs[2];
auto output = outputs[0];
int num_kernels = in_c * bs * out_h * out_w;
const unsigned int threads = 512;
const unsigned int blocks = (num_kernels + threads - 1) / threads;
if (!mColumnDev) {
auto size = in_c * kernel_h_ * kernel_w_ * bs * out_h * out_w;
gLogVerbose << "initialize enqueue mColumnDev count: " << size << std::endl;
CUASSERT(hipMalloc(&mColumnDev, size * mParamWordsize))
CUASSERT(hipMemset(mColumnDev, 0, size * mParamWordsize))
}
if (inputDesc[0].type == nvinfer1::DataType::kHALF) {
hipLaunchKernelGGL(( deformable_im2col_kernel), dim3(blocks), dim3(threads), 0, stream,
num_kernels,
(const half*)input,
(const half*)offset,
(const half*)mask,
in_h,
in_w,
kernel_h_,
kernel_w_,
pad_h_,
pad_w_,
stride_h_,
stride_w_,
dilation_h_,
dilation_w_,
bs,
in_c_,
offset_groups_,
out_h,
out_w,
use_mask_,
(half*)mColumnDev);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in deformable_im2col: %s\n", hipGetErrorString(err));
}
int m = out_c;
int n = bs * out_h * out_w;
int k = in_c * kernel_h_ * kernel_w_;
gemm(
(half*)output, (half*)mColumnDev, (half*)mWeightDev.get(), n, m, k, n, k, n, HIPBLAS_OP_N, HIPBLAS_OP_N, mCublas);
hipError_t gemm_err = hipGetLastError();
if (gemm_err != hipSuccess) {
printf("error in hipblasSgemm: %s\n", hipGetErrorString(gemm_err));
}
// output [out_c, bs, out_h, out_w]
add_bias_kernelLauncher((half*)mColumnDev, (const half*)mBiasDev.get(), out_c, bs, out_h, out_w, stream);
hipError_t bias_err = hipGetLastError();
if (bias_err != hipSuccess) {
printf("error in add_bias_kernelLauncher: %s\n", hipGetErrorString(bias_err));
}
} else {
// float
hipLaunchKernelGGL(( deformable_im2col_kernel), dim3(blocks), dim3(threads), 0, stream,
num_kernels,
(const float*)input,
(const float*)offset,
(const float*)mask,
in_h,
in_w,
kernel_h_,
kernel_w_,
pad_h_,
pad_w_,
stride_h_,
stride_w_,
dilation_h_,
dilation_w_,
bs,
in_c_,
offset_groups_,
out_h,
out_w,
use_mask_,
(float*)mColumnDev);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in deformable_im2col: %s\n", hipGetErrorString(err));
}
int m = out_c;
int n = bs * out_h * out_w;
int k = in_c * kernel_h_ * kernel_w_;
// in_c_ * kernel_h_ * kernel_w_ * max_batch_size * out_h * out_w;
gemm(
(float*)output,
(float*)mColumnDev,
(float*)mWeightDev.get(),
n,
m,
k,
n,
k,
n,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
mCublas);
hipError_t gemm_err = hipGetLastError();
if (gemm_err != hipSuccess) {
printf("error in hipblasSgemm: %s\n", hipGetErrorString(gemm_err));
}
// output [out_c, bs, out_h, out_w]
add_bias_kernelLauncher((float*)mColumnDev, (const float*)mBiasDev.get(), out_c, bs, out_h, out_w, stream);
hipError_t bias_err = hipGetLastError();
if (bias_err != hipSuccess) {
printf("error in add_bias_kernelLauncher: %s\n", hipGetErrorString(bias_err));
}
}
return 0;
}
} // namespace plugin
} // namespace nvinfer1
| 7ea1e7f3bb50e2541546ac87e73e2d6e9e6eccc9.cu | // modified from
// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu
// modified from
// https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda.cpp
#include "deform_conv2d.h"
namespace nvinfer1 {
namespace plugin {
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = (blockIdx.x * blockDim.x) + threadIdx.x; i < (n); i += (blockDim.x * gridDim.x))
template <typename integer>
constexpr __host__ __device__ inline integer ceil_div(integer n, integer m) {
return (n + m - 1) / m;
}
/******************* add_bias_kernelLauncher ***********************/
template <typename T>
__global__ void add_bias(T* x, const T* bias, int n) {
const int bid = blockIdx.x;
auto b = __ldg(&bias[bid]);
for (int tid = threadIdx.x; tid < n; tid += blockDim.x)
x[bid * n + tid] += b;
}
// [channel, batch, H, W] x + [channel] bias
template <typename T>
void add_bias_kernelLauncher(T* x, const T* bias, int channel, int batch, int H, int W, cudaStream_t stream) {
dim3 grid(channel);
int n = W * H * batch;
int blockSize = n;
if (std::is_same<T, half>::value && (n % 2 == 0)) {
blockSize = n / 2;
if (blockSize > 1024)
blockSize = 1024;
add_bias<<<grid, blockSize, 0, stream>>>((half2*)x, (const half2*)bias, n / 2);
} else {
if (blockSize > 1024)
blockSize = 1024;
add_bias<<<grid, blockSize, 0, stream>>>(x, bias, n);
}
}
template <typename T>
__device__ T bilinear_interpolate(const T* in, int height, int width, T h, T w) {
if (h <= T(-1) || T(height) <= h || w <= T(-1) || T(width) <= w) {
return T(0);
}
int h_low = floor((float)h);
int w_low = floor((float)w);
int h_high = h_low + 1;
int w_high = w_low + 1;
T lh = h - T(h_low);
T lw = w - T(w_low);
T hh = T(1) - lh, hw = T(1) - lw;
T v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = __ldg(&in[h_low * width + w_low]);
T v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = __ldg(&in[h_low * width + w_high]);
T v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = __ldg(&in[h_high * width + w_low]);
T v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = __ldg(&in[h_high * width + w_high]);
T w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void deformable_im2col_kernel(
int n,
const T* input_ptr,
const T* offset_ptr,
const T* mask_ptr,
int height,
int width,
int weight_h,
int weight_w,
int pad_h,
int pad_w,
int stride_h,
int stride_w,
int dilation_h,
int dilation_w,
int batch_sz,
int n_in_channels,
int n_offset_grps,
int out_h,
int out_w,
bool use_mask,
T* columns_ptr) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int out_x = index % out_w;
const int out_y = (index / out_w) % out_h;
const int out_b = (index / (out_w * out_h)) % batch_sz;
const int in_c = index / (out_w * out_h * batch_sz);
const int out_c = in_c * weight_h * weight_w;
int c_per_offset_grp = n_in_channels / n_offset_grps;
const int grp_idx = in_c / c_per_offset_grp;
columns_ptr += (out_c * (batch_sz * out_h * out_w) + out_b * (out_h * out_w) + out_y * out_w + out_x);
input_ptr += (out_b * (n_in_channels * height * width) + in_c * (height * width));
offset_ptr += (out_b * n_offset_grps + grp_idx) * 2 * weight_h * weight_w * out_h * out_w;
if (use_mask) {
mask_ptr += (out_b * n_offset_grps + grp_idx) * weight_h * weight_w * out_h * out_w;
}
for (int i = 0; i < weight_h; ++i) {
for (int j = 0; j < weight_w; ++j) {
const int mask_idx = i * weight_w + j;
const int offset_idx = 2 * mask_idx;
T mask_value = 1;
if (use_mask) {
mask_value = __ldg(&mask_ptr[mask_idx * (out_h * out_w) + out_y * out_w + out_x]);
}
const T offset_h = __ldg(&offset_ptr[offset_idx * (out_h * out_w) + out_y * out_w + out_x]);
const T offset_w = __ldg(&offset_ptr[(offset_idx + 1) * (out_h * out_w) + out_y * out_w + out_x]);
const T y = T(out_y * stride_h - pad_h) + T(i * dilation_h) + offset_h;
const T x = T(out_x * stride_w - pad_w) + T(j * dilation_w) + offset_w;
*columns_ptr = mask_value * bilinear_interpolate(input_ptr, height, width, y, x);
columns_ptr += batch_sz * out_h * out_w;
}
}
}
}
// input, weight, output are row-major
template <typename T>
void gemm(
T* C,
const T* A,
const T* B,
const int m,
const int n,
const int k,
const int lda,
const int ldb,
const int ldc,
cublasOperation_t trans_a,
cublasOperation_t trans_b,
cublasHandle_t cublas_handle,
float scale = 1.0f) {
cudaDataType_t Atype, Btype, Ctype, computeType;
float alpha_float = scale;
float beta_float = 0.0f;
half alpha_half = half(scale);
half beta_half = half(0.0f);
void *alpha, *beta;
int cublasAlgo;
if (std::is_same<T, float>::value) {
computeType = CUDA_R_32F;
Atype = CUDA_R_32F;
Btype = CUDA_R_32F;
Ctype = CUDA_R_32F;
alpha = &alpha_float;
beta = &beta_float;
cublasAlgo = CUBLAS_GEMM_DEFAULT;
} else {
computeType = CUDA_R_16F;
Atype = CUDA_R_16F;
Btype = CUDA_R_16F;
Ctype = CUDA_R_16F;
alpha = &alpha_half;
beta = &beta_half;
cublasAlgo = CUBLAS_GEMM_DEFAULT_TENSOR_OP;
}
cublasGemmEx(
cublas_handle,
trans_a,
trans_b,
m,
n,
k,
alpha,
A,
Atype,
lda,
B,
Btype,
ldb,
beta,
C,
Ctype,
ldc,
computeType,
static_cast<cublasGemmAlgo_t>(cublasAlgo));
}
int DeformConv2D::enqueue(
const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc,
const void* const* inputs,
void* const* outputs,
void* workspace,
cudaStream_t stream) {
auto input_shape = inputDesc[0].dims;
auto bs = input_shape.d[0];
auto in_c = input_shape.d[1];
int in_h = input_shape.d[2];
int in_w = input_shape.d[3];
auto output_shape = outputDesc[0].dims;
int out_c = output_shape.d[1];
int out_h = output_shape.d[2];
int out_w = output_shape.d[3];
auto input = inputs[0];
auto offset = inputs[1];
auto mask = inputs[2];
auto output = outputs[0];
int num_kernels = in_c * bs * out_h * out_w;
const unsigned int threads = 512;
const unsigned int blocks = (num_kernels + threads - 1) / threads;
if (!mColumnDev) {
auto size = in_c * kernel_h_ * kernel_w_ * bs * out_h * out_w;
gLogVerbose << "initialize enqueue mColumnDev count: " << size << std::endl;
CUASSERT(cudaMalloc(&mColumnDev, size * mParamWordsize))
CUASSERT(cudaMemset(mColumnDev, 0, size * mParamWordsize))
}
if (inputDesc[0].type == nvinfer1::DataType::kHALF) {
deformable_im2col_kernel<<<blocks, threads, 0, stream>>>(
num_kernels,
(const half*)input,
(const half*)offset,
(const half*)mask,
in_h,
in_w,
kernel_h_,
kernel_w_,
pad_h_,
pad_w_,
stride_h_,
stride_w_,
dilation_h_,
dilation_w_,
bs,
in_c_,
offset_groups_,
out_h,
out_w,
use_mask_,
(half*)mColumnDev);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in deformable_im2col: %s\n", cudaGetErrorString(err));
}
int m = out_c;
int n = bs * out_h * out_w;
int k = in_c * kernel_h_ * kernel_w_;
gemm(
(half*)output, (half*)mColumnDev, (half*)mWeightDev.get(), n, m, k, n, k, n, CUBLAS_OP_N, CUBLAS_OP_N, mCublas);
cudaError_t gemm_err = cudaGetLastError();
if (gemm_err != cudaSuccess) {
printf("error in cublasSgemm_v2: %s\n", cudaGetErrorString(gemm_err));
}
// output [out_c, bs, out_h, out_w]
add_bias_kernelLauncher((half*)mColumnDev, (const half*)mBiasDev.get(), out_c, bs, out_h, out_w, stream);
cudaError_t bias_err = cudaGetLastError();
if (bias_err != cudaSuccess) {
printf("error in add_bias_kernelLauncher: %s\n", cudaGetErrorString(bias_err));
}
} else {
// float
deformable_im2col_kernel<<<blocks, threads, 0, stream>>>(
num_kernels,
(const float*)input,
(const float*)offset,
(const float*)mask,
in_h,
in_w,
kernel_h_,
kernel_w_,
pad_h_,
pad_w_,
stride_h_,
stride_w_,
dilation_h_,
dilation_w_,
bs,
in_c_,
offset_groups_,
out_h,
out_w,
use_mask_,
(float*)mColumnDev);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in deformable_im2col: %s\n", cudaGetErrorString(err));
}
int m = out_c;
int n = bs * out_h * out_w;
int k = in_c * kernel_h_ * kernel_w_;
// in_c_ * kernel_h_ * kernel_w_ * max_batch_size * out_h * out_w;
gemm(
(float*)output,
(float*)mColumnDev,
(float*)mWeightDev.get(),
n,
m,
k,
n,
k,
n,
CUBLAS_OP_N,
CUBLAS_OP_N,
mCublas);
cudaError_t gemm_err = cudaGetLastError();
if (gemm_err != cudaSuccess) {
printf("error in cublasSgemm_v2: %s\n", cudaGetErrorString(gemm_err));
}
// output [out_c, bs, out_h, out_w]
add_bias_kernelLauncher((float*)mColumnDev, (const float*)mBiasDev.get(), out_c, bs, out_h, out_w, stream);
cudaError_t bias_err = cudaGetLastError();
if (bias_err != cudaSuccess) {
printf("error in add_bias_kernelLauncher: %s\n", cudaGetErrorString(bias_err));
}
}
return 0;
}
} // namespace plugin
} // namespace nvinfer1
|
d320fe301ebf046280193af448cf8c2310d00ac9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2017 Stanford, NVIDIA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::batch_norm(const Tensor& input,
bool relu)
{
assert(input.numDim == 4); //Only support 4D BN for now
BatchNorm *bn = new BatchNorm(*this, input, relu);
layers.push_back(bn);
return bn->outputs[0];
}
/*
locals[0] = scale
locals[1] = bias
*/
BatchNorm::BatchNorm(FFModel& model,
const Tensor& _input,
bool _relu)
: Op(model, OP_BATCHNORM, "BatchNorm", _input), relu(_relu), profiling(model.config.profiling)
{
assert(_input.numDim == 4);
numOutputs = 1;
outputs[0] = _input;
numWeights = 2;
weights[0].numDim = 1;
weights[0].adim[0] = _input.adim[2];
weights[1].numDim = 1;
weights[1].adim[0] = _input.adim[2];
return;
#ifdef DEADCODE
// Create output tensor
int output_w = _input.adim[0];
int output_h = _input.adim[1];
int output_c = _input.adim[2];
int output_n = _input.adim[3];
FieldSpace fs = model.config.field_space;
Rect<4> output_rect(Point<4>(0, 0, 0, 0),
Point<4>(output_w-1, output_h-1, output_c-1, output_n-1));
IndexSpaceT<4> output_is = runtime->create_index_space(ctx, output_rect);
LogicalRegion output_lr = runtime->create_logical_region(ctx, output_is, fs);
LogicalRegion output_grad_lr = runtime->create_logical_region(ctx, output_is, fs);
int extent_w = (output_w + num_par_w - 1) / num_par_w;
int extent_h = (output_h + num_par_h - 1) / num_par_h;
int extent_c = output_c / num_par_c;
int extent_n = output_n / num_par_n;
assert(output_c % num_par_c == 0);
assert(output_n % num_par_n == 0);
Rect<4> ext(Point<4>(0, 0, 0, 0),
Point<4>(extent_w-1, extent_h-1, extent_c-1, extent_n-1));
Transform<4, 4, coord_t> trans;
for (int i = 0; i < 4; i++)
for (int j = 0; j < 4; j++)
trans[i][j] = 0;
trans[0][0] = extent_w;
trans[1][1] = extent_h;
trans[2][2] = extent_c;
trans[3][3] = extent_n;
IndexPartition output_ip =
runtime->create_partition_by_restriction(ctx, output_is, task_is, trans, ext);
assert(runtime->is_index_partition_disjoint(ctx, output_ip));
assert(runtime->is_index_partition_complete(ctx, output_ip));
LogicalPartition output_lp = runtime->get_logical_partition(ctx, output_lr, output_ip);
LogicalPartition output_grad_lp =
runtime->get_logical_partition(ctx, output_grad_lr, output_ip);
int bias_nc = num_replica * _input.adim[2]; /*input_channels*/
Rect<1, coord_t> bias_grad_rect(0, bias_nc - 1);
Rect<1, coord_t> bias_rect(0, _input.adim[2] - 1);
IndexSpaceT<1> bias_is = runtime->create_index_space(ctx, bias_rect);
IndexSpaceT<1> bias_grad_is = runtime->create_index_space(ctx, bias_grad_rect);
LogicalRegion bias_lr = runtime->create_logical_region(ctx, bias_is, fs);
LogicalRegion scale_lr = runtime->create_logical_region(ctx, bias_is, fs);
LogicalRegion bias_grad_lr =
runtime->create_logical_region(ctx, bias_grad_is, fs);
LogicalRegion scale_grad_lr =
runtime->create_logical_region(ctx, bias_grad_is, fs);
IndexPartition bias_grad_ip =
runtime->create_equal_partition(ctx, bias_grad_is, task_is);
LogicalPartition bias_grad_lp =
runtime->get_logical_partition(ctx, bias_grad_lr, bias_grad_ip);
LogicalPartition scale_grad_lp =
runtime->get_logical_partition(ctx, scale_grad_lr, bias_grad_ip);
Parameter scale_tensor, bias_tensor;
scale_tensor.region = scale_lr;
scale_tensor.region_grad = scale_grad_lr;
scale_tensor.part = LogicalPartition::NO_PART;
scale_tensor.part_grad = scale_grad_lp;
weights[0] = scale_tensor;
bias_tensor.region = bias_lr;
bias_tensor.region_grad = bias_grad_lr;
bias_tensor.part = LogicalPartition::NO_PART;
bias_tensor.part_grad = bias_grad_lp;
weights[1] = bias_tensor;
numWeights = 2;
outputs[0] = _input;
outputs[0].region = output_lr;
outputs[0].part = output_lp;
outputs[0].region_grad = output_grad_lr;
outputs[0].part_grad = output_grad_lp;
printf("Create bn layer: output(%d %d %d %d)\n",
outputs[0].adim[3], outputs[0].adim[2], outputs[0].adim[1], outputs[0].adim[0]);
input_lps[0] = _input.part;
#endif
}
void BatchNorm::create_weights(FFModel& model)
{
// Retrive the task indexspace for the op
std::string pcname = name;
task_is = IndexSpaceT<4>(model.get_or_create_task_is(4, pcname));
// Create scale and bias
Initializer* scale_initializer = new ConstantInitializer(1.0f);
Initializer* bias_initializer = new ConstantInitializer(0.0f);
const int dims[1] = {outputs[0].adim[2]};
weights[0] = model.create_conv_weight<1>(this, dims, (IndexSpaceT<4>)task_is, DT_FLOAT, scale_initializer);
weights[1] = model.create_conv_weight<1>(this, dims, (IndexSpaceT<4>)task_is, DT_FLOAT, bias_initializer);
}
void BatchNorm::create_output_and_partition(FFModel& model)
{
// Retrive the task indexspace for the op
std::string pcname = name;
task_is = IndexSpaceT<4>(model.get_or_create_task_is(4, pcname));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<4> part_rect = runtime->get_index_space_domain(ctx, task_is);
// Create output tensor
int output_w = outputs[0].adim[0];
int output_h = outputs[0].adim[1];
int output_c = outputs[0].adim[2];
int output_n = outputs[0].adim[3];
int num_par_w = part_rect.hi[0] - part_rect.lo[0] + 1;
int num_par_h = part_rect.hi[1] - part_rect.lo[1] + 1;
int num_par_c = part_rect.hi[2] - part_rect.lo[2] + 1;
int num_par_n = part_rect.hi[3] - part_rect.lo[3] + 1;
{
const int dims[4] = {output_n, output_c, output_h, output_w};
outputs[0] = model.create_tensor<4>(dims, (IndexSpaceT<4>)task_is, DT_FLOAT);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
}
// Currently assume data parallelism for batch norm
assert(num_par_w == 1);
assert(num_par_h == 1);
assert(num_par_c == 1);
// Compute partition bound for input
Rect<4> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[0] = inputs[0].part;
input_grad_lps[0] = inputs[0].part_grad;
} else {
model.create_disjoint_partition(
inputs[0], (IndexSpaceT<4>)task_is, input_lps[0], input_grad_lps[0]);
}
}
/*
regions[0]: input
regions[1]: output
regions[2](I): scale
regions[3](I): bias
*/
__host__
OpMeta* BatchNorm::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
const BatchNorm* bm = (BatchNorm*) task->args;
FFHandler handle = *((const FFHandler*) task->local_args);
TensorAccessorR<float, 4> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_scale(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_bias(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
BatchNormMeta* m = new BatchNormMeta(handle);
m->relu = bm->relu;
m->mode = CUDNN_BATCHNORM_SPATIAL;
#if CUDNN_VERSION >= 7000
m->mode = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
#endif
checkCUDNN(cudnnCreateTensorDescriptor(&m->inputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&m->biasTensor));
assert(acc_input.rect == acc_output.rect);
int input_w = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1;
int input_h = acc_input.rect.hi[1] - acc_input.rect.lo[1] + 1;
int input_c = acc_input.rect.hi[2] - acc_input.rect.lo[2] + 1;
int input_n = acc_input.rect.hi[3] - acc_input.rect.lo[3] + 1;
int output_w = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1;
int output_h = acc_output.rect.hi[1] - acc_output.rect.lo[1] + 1;
int output_c = acc_output.rect.hi[2] - acc_output.rect.lo[2] + 1;
int output_n = acc_output.rect.hi[3] - acc_output.rect.lo[3] + 1;
checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
input_n, input_c,
input_h, input_w));
checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
output_n, output_c,
output_h, output_w));
checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
1, output_c, 1, 1));
//float *runningMean, *runningVar, *saveMean, *saveVar;
checkCUDA(hipMalloc(&m->runningMean, sizeof(float) * output_c));
checkCUDA(hipMalloc(&m->runningVar, sizeof(float) * output_c));
checkCUDA(hipMalloc(&m->saveMean, sizeof(float) * output_c));
checkCUDA(hipMalloc(&m->saveVar, sizeof(float) * output_c));
if (m->relu) {
checkCUDNN(cudnnCreateActivationDescriptor(&m->actiDesc));
checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU,
CUDNN_PROPAGATE_NAN, 0.0));
}
return m;
}
/*
regions[0](O): scale, initilized to ones
regions[1](O): bias, initilized to zeros
*/
__host__
void BatchNorm::init_para_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
//const BatchNorm* bm = (BatchNorm*) task->args;
const AccessorWO<float, 1> acc_scale(regions[0], FID_DATA);
const AccessorWO<float, 1> acc_bias(regions[1], FID_DATA);
Rect<1> rect_scale, rect_bias;
rect_scale = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_bias = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
assert(acc_scale.accessor.is_dense_arbitrary(rect_scale));
assert(acc_bias.accessor.is_dense_arbitrary(rect_bias));
float *scale_ptr = acc_scale.ptr(rect_scale.lo);
float *bias_ptr = acc_bias.ptr(rect_bias.lo);
// init kernel and bias
#ifdef PARAMETER_ALL_ONES
hipLaunchKernelGGL(( ones_kernel), dim3(GET_BLOCKS(rect_scale.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
scale_ptr, rect_scale.volume());
hipLaunchKernelGGL(( ones_kernel), dim3(GET_BLOCKS(rect_bias.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
bias_ptr, rect_bias.volume());
#else
//hipStream_t stream;
//checkCUDA(hipStreamCreate(&stream));
//hiprandGenerator_t genGPU;
//hiprandCreateGenerator(&genGPU, HIPRAND_RNG_PSEUDO_DEFAULT);
//hiprandSetStream(genGPU, stream);
//hiprandSetPseudoRandomGeneratorSeed(genGPU, 1234ULL);
//hiprandGenerateUniform(genGPU, scale_ptr, rect_scale.volume());
hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(rect_scale.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
scale_ptr, rect_scale.volume(), 1.0f);
hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(rect_bias.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
bias_ptr, rect_bias.volume(), 0.0f);
//hiprandDestroyGenerator(genGPU);
#endif
}
__host__
void BatchNorm::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
ParallelConfig pc;
std::string pcname = name;
ff.config.find_parallel_config(4, pcname, pc);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
FFHandler handle = ff.handlers[pc.device_ids[idx++]];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
IndexLauncher launcher(BATCHNORM_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(BatchNorm)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[0].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[1].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[1].region));
launcher.add_field(3, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
fm.wait_all_results();
idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
}
/*
regions[0](I): input
regions[1](O): ouptut
regions[2](I): scale
regions[3](I): bias
*/
__host__
void BatchNorm::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
float alpha = 1.0f, beta = 0.0f;
const BatchNorm* bm = (BatchNorm*) task->args;
const BatchNormMeta* m = *((BatchNormMeta**) task->local_args);
TensorAccessorR<float, 4> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_scale(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_bias(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
hipEvent_t t_start, t_end;
if (bm->profiling) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
coord_t numChannels = acc_output.rect.hi[2] - acc_output.rect.lo[2] + 1;
hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(numChannels)), dim3(CUDA_NUM_THREADS), 0, 0, m->runningMean, numChannels, 0.0f);
hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(numChannels)), dim3(CUDA_NUM_THREADS), 0, 0, m->runningVar, numChannels, 0.0f);
checkCUDNN(cudnnBatchNormalizationForwardTraining(
m->handle.dnn, m->mode, &alpha, &beta, m->inputTensor, acc_input.ptr,
m->outputTensor, acc_output.ptr, m->biasTensor, acc_scale.ptr, acc_bias.ptr,
1.0, m->runningMean, m->runningVar, CUDNN_BN_MIN_EPSILON,
m->saveMean, m->saveVar));
if (bm->profiling) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("BatchNorm forward time (BF) = %.2fms\n", elapsed);
}
}
__host__
void BatchNorm::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(BATCHNORM_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(BatchNorm)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[0].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[1].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[1].region));
launcher.add_field(3, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
/*
regions[0](I): input
regions[1](I/O): input_grad
regions[2](I): output
regions[3](I/O): output_grad
regions[4](I): scale
regions[5](I/O): scale_grad
regions[6](I/O): bias_grad
*/
__host__
void BatchNorm::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 7);
assert(task->regions.size() == 7);
float alpha = 1.0f;
//float beta = 0.0f;
const BatchNorm* bm = (BatchNorm*) task->args;
const BatchNormMeta* m = *((BatchNormMeta**) task->local_args);
TensorAccessorR<float, 4> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_input_grad(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorR<float, 4> acc_output(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_output_grad(
regions[3], task->regions[3], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorR<float, 1> acc_scale(
regions[4], task->regions[4], FID_DATA, ctx, runtime);
TensorAccessorW<float, 1> acc_scale_grad(
regions[5], task->regions[5], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorW<float, 1> acc_bias_grad(
regions[6], task->regions[6], FID_DATA, ctx, runtime,
true/*readOutput*/);
hipEvent_t t_start, t_end;
if (bm->profiling) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
if (m->relu) {
int n = acc_output.rect.volume();
hipLaunchKernelGGL(( reluBackward), dim3(GET_BLOCKS(n)), dim3(CUDA_NUM_THREADS), 0, 0, acc_output_grad.ptr, acc_output.ptr, n);
}
checkCUDNN(cudnnBatchNormalizationBackward(
m->handle.dnn, m->mode, &alpha, &alpha, &alpha, &alpha,
m->inputTensor, acc_input.ptr, m->outputTensor, acc_output_grad.ptr,
m->inputTensor, acc_input_grad.ptr, m->biasTensor, acc_scale.ptr,
acc_scale_grad.ptr, acc_bias_grad.ptr, CUDNN_BN_MIN_EPSILON,
m->saveMean, m->saveVar));
if (bm->profiling) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("BatchNorm backward time = %.2fms\n", elapsed);
}
}
__host__
void BatchNorm::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(BATCHNORM_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(BatchNorm)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// regions[0](I): input
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// regions[1](I/O): input_grad (we only need grad tensors)
launcher.add_region_requirement(
RegionRequirement(inputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(1, FID_DATA);
// regions[2](I): output
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
// regions[3](I/O): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(3, FID_DATA);
// regions[4](I): filter
launcher.add_region_requirement(
RegionRequirement(weights[0].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(4, FID_DATA);
// regions[5](I/O): filter_grad
launcher.add_region_requirement(
RegionRequirement(weights[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, weights[0].region_grad));
launcher.add_field(5, FID_DATA);
// regions[6](I/O): bias_grad
launcher.add_region_requirement(
RegionRequirement(weights[1].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, weights[1].region_grad));
launcher.add_field(6, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
}
bool BatchNorm::measure_compute_time(Simulator* sim,
const ParallelConfig& pc,
float& forward_time,
float& backward_time)
{
//TODO: implement measure_forward
return false;
}
| d320fe301ebf046280193af448cf8c2310d00ac9.cu | /* Copyright 2017 Stanford, NVIDIA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::batch_norm(const Tensor& input,
bool relu)
{
assert(input.numDim == 4); //Only support 4D BN for now
BatchNorm *bn = new BatchNorm(*this, input, relu);
layers.push_back(bn);
return bn->outputs[0];
}
/*
locals[0] = scale
locals[1] = bias
*/
BatchNorm::BatchNorm(FFModel& model,
const Tensor& _input,
bool _relu)
: Op(model, OP_BATCHNORM, "BatchNorm", _input), relu(_relu), profiling(model.config.profiling)
{
assert(_input.numDim == 4);
numOutputs = 1;
outputs[0] = _input;
numWeights = 2;
weights[0].numDim = 1;
weights[0].adim[0] = _input.adim[2];
weights[1].numDim = 1;
weights[1].adim[0] = _input.adim[2];
return;
#ifdef DEADCODE
// Create output tensor
int output_w = _input.adim[0];
int output_h = _input.adim[1];
int output_c = _input.adim[2];
int output_n = _input.adim[3];
FieldSpace fs = model.config.field_space;
Rect<4> output_rect(Point<4>(0, 0, 0, 0),
Point<4>(output_w-1, output_h-1, output_c-1, output_n-1));
IndexSpaceT<4> output_is = runtime->create_index_space(ctx, output_rect);
LogicalRegion output_lr = runtime->create_logical_region(ctx, output_is, fs);
LogicalRegion output_grad_lr = runtime->create_logical_region(ctx, output_is, fs);
int extent_w = (output_w + num_par_w - 1) / num_par_w;
int extent_h = (output_h + num_par_h - 1) / num_par_h;
int extent_c = output_c / num_par_c;
int extent_n = output_n / num_par_n;
assert(output_c % num_par_c == 0);
assert(output_n % num_par_n == 0);
Rect<4> ext(Point<4>(0, 0, 0, 0),
Point<4>(extent_w-1, extent_h-1, extent_c-1, extent_n-1));
Transform<4, 4, coord_t> trans;
for (int i = 0; i < 4; i++)
for (int j = 0; j < 4; j++)
trans[i][j] = 0;
trans[0][0] = extent_w;
trans[1][1] = extent_h;
trans[2][2] = extent_c;
trans[3][3] = extent_n;
IndexPartition output_ip =
runtime->create_partition_by_restriction(ctx, output_is, task_is, trans, ext);
assert(runtime->is_index_partition_disjoint(ctx, output_ip));
assert(runtime->is_index_partition_complete(ctx, output_ip));
LogicalPartition output_lp = runtime->get_logical_partition(ctx, output_lr, output_ip);
LogicalPartition output_grad_lp =
runtime->get_logical_partition(ctx, output_grad_lr, output_ip);
int bias_nc = num_replica * _input.adim[2]; /*input_channels*/
Rect<1, coord_t> bias_grad_rect(0, bias_nc - 1);
Rect<1, coord_t> bias_rect(0, _input.adim[2] - 1);
IndexSpaceT<1> bias_is = runtime->create_index_space(ctx, bias_rect);
IndexSpaceT<1> bias_grad_is = runtime->create_index_space(ctx, bias_grad_rect);
LogicalRegion bias_lr = runtime->create_logical_region(ctx, bias_is, fs);
LogicalRegion scale_lr = runtime->create_logical_region(ctx, bias_is, fs);
LogicalRegion bias_grad_lr =
runtime->create_logical_region(ctx, bias_grad_is, fs);
LogicalRegion scale_grad_lr =
runtime->create_logical_region(ctx, bias_grad_is, fs);
IndexPartition bias_grad_ip =
runtime->create_equal_partition(ctx, bias_grad_is, task_is);
LogicalPartition bias_grad_lp =
runtime->get_logical_partition(ctx, bias_grad_lr, bias_grad_ip);
LogicalPartition scale_grad_lp =
runtime->get_logical_partition(ctx, scale_grad_lr, bias_grad_ip);
Parameter scale_tensor, bias_tensor;
scale_tensor.region = scale_lr;
scale_tensor.region_grad = scale_grad_lr;
scale_tensor.part = LogicalPartition::NO_PART;
scale_tensor.part_grad = scale_grad_lp;
weights[0] = scale_tensor;
bias_tensor.region = bias_lr;
bias_tensor.region_grad = bias_grad_lr;
bias_tensor.part = LogicalPartition::NO_PART;
bias_tensor.part_grad = bias_grad_lp;
weights[1] = bias_tensor;
numWeights = 2;
outputs[0] = _input;
outputs[0].region = output_lr;
outputs[0].part = output_lp;
outputs[0].region_grad = output_grad_lr;
outputs[0].part_grad = output_grad_lp;
printf("Create bn layer: output(%d %d %d %d)\n",
outputs[0].adim[3], outputs[0].adim[2], outputs[0].adim[1], outputs[0].adim[0]);
input_lps[0] = _input.part;
#endif
}
void BatchNorm::create_weights(FFModel& model)
{
// Retrive the task indexspace for the op
std::string pcname = name;
task_is = IndexSpaceT<4>(model.get_or_create_task_is(4, pcname));
// Create scale and bias
Initializer* scale_initializer = new ConstantInitializer(1.0f);
Initializer* bias_initializer = new ConstantInitializer(0.0f);
const int dims[1] = {outputs[0].adim[2]};
weights[0] = model.create_conv_weight<1>(this, dims, (IndexSpaceT<4>)task_is, DT_FLOAT, scale_initializer);
weights[1] = model.create_conv_weight<1>(this, dims, (IndexSpaceT<4>)task_is, DT_FLOAT, bias_initializer);
}
void BatchNorm::create_output_and_partition(FFModel& model)
{
// Retrive the task indexspace for the op
std::string pcname = name;
task_is = IndexSpaceT<4>(model.get_or_create_task_is(4, pcname));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<4> part_rect = runtime->get_index_space_domain(ctx, task_is);
// Create output tensor
int output_w = outputs[0].adim[0];
int output_h = outputs[0].adim[1];
int output_c = outputs[0].adim[2];
int output_n = outputs[0].adim[3];
int num_par_w = part_rect.hi[0] - part_rect.lo[0] + 1;
int num_par_h = part_rect.hi[1] - part_rect.lo[1] + 1;
int num_par_c = part_rect.hi[2] - part_rect.lo[2] + 1;
int num_par_n = part_rect.hi[3] - part_rect.lo[3] + 1;
{
const int dims[4] = {output_n, output_c, output_h, output_w};
outputs[0] = model.create_tensor<4>(dims, (IndexSpaceT<4>)task_is, DT_FLOAT);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
}
// Currently assume data parallelism for batch norm
assert(num_par_w == 1);
assert(num_par_h == 1);
assert(num_par_c == 1);
// Compute partition bound for input
Rect<4> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[0] = inputs[0].part;
input_grad_lps[0] = inputs[0].part_grad;
} else {
model.create_disjoint_partition(
inputs[0], (IndexSpaceT<4>)task_is, input_lps[0], input_grad_lps[0]);
}
}
/*
regions[0]: input
regions[1]: output
regions[2](I): scale
regions[3](I): bias
*/
__host__
OpMeta* BatchNorm::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
const BatchNorm* bm = (BatchNorm*) task->args;
FFHandler handle = *((const FFHandler*) task->local_args);
TensorAccessorR<float, 4> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_scale(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_bias(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
BatchNormMeta* m = new BatchNormMeta(handle);
m->relu = bm->relu;
m->mode = CUDNN_BATCHNORM_SPATIAL;
#if CUDNN_VERSION >= 7000
m->mode = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
#endif
checkCUDNN(cudnnCreateTensorDescriptor(&m->inputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&m->biasTensor));
assert(acc_input.rect == acc_output.rect);
int input_w = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1;
int input_h = acc_input.rect.hi[1] - acc_input.rect.lo[1] + 1;
int input_c = acc_input.rect.hi[2] - acc_input.rect.lo[2] + 1;
int input_n = acc_input.rect.hi[3] - acc_input.rect.lo[3] + 1;
int output_w = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1;
int output_h = acc_output.rect.hi[1] - acc_output.rect.lo[1] + 1;
int output_c = acc_output.rect.hi[2] - acc_output.rect.lo[2] + 1;
int output_n = acc_output.rect.hi[3] - acc_output.rect.lo[3] + 1;
checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
input_n, input_c,
input_h, input_w));
checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
output_n, output_c,
output_h, output_w));
checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
1, output_c, 1, 1));
//float *runningMean, *runningVar, *saveMean, *saveVar;
checkCUDA(cudaMalloc(&m->runningMean, sizeof(float) * output_c));
checkCUDA(cudaMalloc(&m->runningVar, sizeof(float) * output_c));
checkCUDA(cudaMalloc(&m->saveMean, sizeof(float) * output_c));
checkCUDA(cudaMalloc(&m->saveVar, sizeof(float) * output_c));
if (m->relu) {
checkCUDNN(cudnnCreateActivationDescriptor(&m->actiDesc));
checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU,
CUDNN_PROPAGATE_NAN, 0.0));
}
return m;
}
/*
regions[0](O): scale, initilized to ones
regions[1](O): bias, initilized to zeros
*/
__host__
void BatchNorm::init_para_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
//const BatchNorm* bm = (BatchNorm*) task->args;
const AccessorWO<float, 1> acc_scale(regions[0], FID_DATA);
const AccessorWO<float, 1> acc_bias(regions[1], FID_DATA);
Rect<1> rect_scale, rect_bias;
rect_scale = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_bias = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
assert(acc_scale.accessor.is_dense_arbitrary(rect_scale));
assert(acc_bias.accessor.is_dense_arbitrary(rect_bias));
float *scale_ptr = acc_scale.ptr(rect_scale.lo);
float *bias_ptr = acc_bias.ptr(rect_bias.lo);
// init kernel and bias
#ifdef PARAMETER_ALL_ONES
ones_kernel<<<GET_BLOCKS(rect_scale.volume()), CUDA_NUM_THREADS>>>(
scale_ptr, rect_scale.volume());
ones_kernel<<<GET_BLOCKS(rect_bias.volume()), CUDA_NUM_THREADS>>>(
bias_ptr, rect_bias.volume());
#else
//cudaStream_t stream;
//checkCUDA(cudaStreamCreate(&stream));
//curandGenerator_t genGPU;
//curandCreateGenerator(&genGPU, CURAND_RNG_PSEUDO_DEFAULT);
//curandSetStream(genGPU, stream);
//curandSetPseudoRandomGeneratorSeed(genGPU, 1234ULL);
//curandGenerateUniform(genGPU, scale_ptr, rect_scale.volume());
assign_kernel<<<GET_BLOCKS(rect_scale.volume()), CUDA_NUM_THREADS>>>(
scale_ptr, rect_scale.volume(), 1.0f);
assign_kernel<<<GET_BLOCKS(rect_bias.volume()), CUDA_NUM_THREADS>>>(
bias_ptr, rect_bias.volume(), 0.0f);
//curandDestroyGenerator(genGPU);
#endif
}
__host__
void BatchNorm::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
ParallelConfig pc;
std::string pcname = name;
ff.config.find_parallel_config(4, pcname, pc);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
FFHandler handle = ff.handlers[pc.device_ids[idx++]];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
IndexLauncher launcher(BATCHNORM_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(BatchNorm)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[0].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[1].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[1].region));
launcher.add_field(3, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
fm.wait_all_results();
idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
}
/*
regions[0](I): input
regions[1](O): ouptut
regions[2](I): scale
regions[3](I): bias
*/
__host__
void BatchNorm::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
float alpha = 1.0f, beta = 0.0f;
const BatchNorm* bm = (BatchNorm*) task->args;
const BatchNormMeta* m = *((BatchNormMeta**) task->local_args);
TensorAccessorR<float, 4> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_scale(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_bias(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
cudaEvent_t t_start, t_end;
if (bm->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
coord_t numChannels = acc_output.rect.hi[2] - acc_output.rect.lo[2] + 1;
assign_kernel<<<GET_BLOCKS(numChannels), CUDA_NUM_THREADS>>>(m->runningMean, numChannels, 0.0f);
assign_kernel<<<GET_BLOCKS(numChannels), CUDA_NUM_THREADS>>>(m->runningVar, numChannels, 0.0f);
checkCUDNN(cudnnBatchNormalizationForwardTraining(
m->handle.dnn, m->mode, &alpha, &beta, m->inputTensor, acc_input.ptr,
m->outputTensor, acc_output.ptr, m->biasTensor, acc_scale.ptr, acc_bias.ptr,
1.0, m->runningMean, m->runningVar, CUDNN_BN_MIN_EPSILON,
m->saveMean, m->saveVar));
if (bm->profiling) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("BatchNorm forward time (BF) = %.2fms\n", elapsed);
}
}
__host__
void BatchNorm::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(BATCHNORM_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(BatchNorm)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[0].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[1].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[1].region));
launcher.add_field(3, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
/*
regions[0](I): input
regions[1](I/O): input_grad
regions[2](I): output
regions[3](I/O): output_grad
regions[4](I): scale
regions[5](I/O): scale_grad
regions[6](I/O): bias_grad
*/
__host__
void BatchNorm::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 7);
assert(task->regions.size() == 7);
float alpha = 1.0f;
//float beta = 0.0f;
const BatchNorm* bm = (BatchNorm*) task->args;
const BatchNormMeta* m = *((BatchNormMeta**) task->local_args);
TensorAccessorR<float, 4> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_input_grad(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorR<float, 4> acc_output(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_output_grad(
regions[3], task->regions[3], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorR<float, 1> acc_scale(
regions[4], task->regions[4], FID_DATA, ctx, runtime);
TensorAccessorW<float, 1> acc_scale_grad(
regions[5], task->regions[5], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorW<float, 1> acc_bias_grad(
regions[6], task->regions[6], FID_DATA, ctx, runtime,
true/*readOutput*/);
cudaEvent_t t_start, t_end;
if (bm->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
if (m->relu) {
int n = acc_output.rect.volume();
reluBackward<<<GET_BLOCKS(n), CUDA_NUM_THREADS>>>(acc_output_grad.ptr, acc_output.ptr, n);
}
checkCUDNN(cudnnBatchNormalizationBackward(
m->handle.dnn, m->mode, &alpha, &alpha, &alpha, &alpha,
m->inputTensor, acc_input.ptr, m->outputTensor, acc_output_grad.ptr,
m->inputTensor, acc_input_grad.ptr, m->biasTensor, acc_scale.ptr,
acc_scale_grad.ptr, acc_bias_grad.ptr, CUDNN_BN_MIN_EPSILON,
m->saveMean, m->saveVar));
if (bm->profiling) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("BatchNorm backward time = %.2fms\n", elapsed);
}
}
__host__
void BatchNorm::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(BATCHNORM_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(BatchNorm)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// regions[0](I): input
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// regions[1](I/O): input_grad (we only need grad tensors)
launcher.add_region_requirement(
RegionRequirement(inputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(1, FID_DATA);
// regions[2](I): output
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
// regions[3](I/O): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(3, FID_DATA);
// regions[4](I): filter
launcher.add_region_requirement(
RegionRequirement(weights[0].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(4, FID_DATA);
// regions[5](I/O): filter_grad
launcher.add_region_requirement(
RegionRequirement(weights[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, weights[0].region_grad));
launcher.add_field(5, FID_DATA);
// regions[6](I/O): bias_grad
launcher.add_region_requirement(
RegionRequirement(weights[1].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, weights[1].region_grad));
launcher.add_field(6, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
}
bool BatchNorm::measure_compute_time(Simulator* sim,
const ParallelConfig& pc,
float& forward_time,
float& backward_time)
{
//TODO: implement measure_forward
return false;
}
|
70c8bcd2731fe498c0c416decac7aae1434fde49.hip | // !!! This is a file automatically generated by hipify!!!
/*
* LargeBoidsSimulator
*
* Yhoichi Mototake
*/
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
#include <hip/hip_vector_types.h>
#include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#include "calc.h"
#include "param.h"
/////////////
/*Variables*/
/////////////////////////////////////
float3 *speed = NULL;
float3 *tmp_speed = NULL;
float4 *tmp_position = NULL;
float3 *original_speed = NULL;
float4 *original_position = NULL;
float4 *tmp_position_for_fill = NULL;
float4 *dptr = NULL;
int* coord = NULL;
int* coord_particle = NULL;
DataStruct* data_for_calc;
Paramstruct* param_host;
Paramstruct* param_device;
hipStream_t stream0;
hipStream_t stream1;
/////////////////////////////////////
//////////////////////////////////////////
/*Definitions of inline device functions*/
/////////////////////////////////////////////////////////////////////////////////////
inline __host__ __device__ float4 operator+(const float4 &a, const float4 &b)
{
return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
inline __host__ __device__ float3 operator+(const float3 &a, const float3 &b)
{
return make_float3(a.x + b.x, a.y + b.y,a.z + b.z);
}
inline __host__ __device__ double3 operator+(const double3 &a, const float4 &b)
{
return make_double3(a.x + (double)b.x, a.y + (double)b.y,a.z + (double)b.z);
}
inline __host__ __device__ double3 operator+(const double3 &a, const double4 &b)
{
return make_double3(a.x + b.x, a.y + b.y,a.z + b.z);
}
inline __host__ __device__ double3 operator+(const double3 &a, const double3 &b)
{
return make_double3(a.x + b.x, a.y + b.y,a.z + b.z);
}
inline __host__ __device__ double4 operator-(const float4 &a, const float4 &b)
{
return make_double4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w);
}
inline __host__ __device__ float3 operator-(const float3 &a, const float3 &b)
{
return make_float3(a.x - b.x, a.y - b.y, a.z - b.z);
}
inline __host__ __device__ double4 operator+(const double4 &a, const double4 &b)
{
return make_double4(a.x + b.x, a.y + b.y,a.z + b.z,a.w + b.w);
}
inline __host__ __device__ float4 operator-(const float4 &a, const float &b)
{
return make_float4(a.x - b, a.y - b, a.z - b, a.w - b);
}
inline __host__ __device__ float4 operator+(const float4 &a, const float &b)
{
return make_float4(a.x + b, a.y + b, a.z + b, a.w + b);
}
inline __host__ __device__ double4 operator/(const double4 &a, const double &b)
{
return make_double4(a.x / b, a.y / b, a.z / b, a.w / b);
}
inline __host__ __device__ float3 operator/(const float3 &a, const float &b)
{
return make_float3(a.x / b, a.y / b, a.z / b);
}
inline __host__ __device__ double3 operator/(const double3 &a, const int &b)
{
return make_double3(a.x / b, a.y / b, a.z / b);
}
inline __host__ __device__ float4 operator*(const float &a, const float4 &b)
{
return make_float4(a * b.x, a * b.y, a * b.z,a * b.w);
}
inline __host__ __device__ float3 operator*(const float &a, const float3 &b)
{
return make_float3(a * b.x, a * b.y, a * b.z);
}
inline __host__ __device__ double4 operator*(const double &a, const double4 &b)
{
return make_double4(a * b.x, a * b.y, a * b.z,a * b.w);
}
inline __host__ __device__ float dot(float4 a, float4 b)
{
return a.x*b.x + a.y*b.y + a.z*b.z + a.w*b.w;
}
inline __host__ __device__ float dot(float3 a, float3 b)
{
return a.x*b.x + a.y*b.y + a.z*b.z;
}
inline __host__ __device__ float3 cast_float3(double4 b)
{
float3 a;
a.x = (float)b.x;
a.y = (float)b.y;
a.z = (float)b.z;
return a;
}
inline __host__ __device__ float3 cast_float3(double3 b)
{
float3 a;
a.x = (float)b.x;
a.y = (float)b.y;
a.z = (float)b.z;
return a;
}
inline __host__ __device__ float3 cast_float3(float4 b)
{
float3 a;
a.x = (float)b.x;
a.y = (float)b.y;
a.z = (float)b.z;
return a;
}
inline __host__ __device__ float4 cast_float4(float3 b)
{
float4 a;
a.x = (float)b.x;
a.y = (float)b.y;
a.z = (float)b.z;
a.w = 0.0;
return a;
}
/////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////
/*Definitions of global device functions*/
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//main calculation function
__global__ void calc_core(Paramstruct* param,float4 *original_position ,float3 *original_speed , float4 *tmp_position,float4 *tmp_position_for_fill, unsigned int width, unsigned int height,float3 *tmp_speed, int *coord,int* coord_particle)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int index = y*width+x;
float4 position;
position = original_position[index];
float3 velocity;
velocity = original_speed[index];
__shared__ Paramstruct get_param;
get_param = *param;
int neighboursCount;
neighboursCount = 0;
int neighboursCount_ave;
neighboursCount_ave = 0;
double3 neighboursAvgSpeed;
neighboursAvgSpeed = make_double3(0, 0, 0);
double4 neighboursAvgPosition;
neighboursAvgPosition = make_double4(0, 0, 0, 0);
double3 separationForce;
separationForce = make_double3(0, 0, 0);
float4 p;
double d;
int Nd = 2*get_param.field_size*(int)(1/get_param.mesh_size);
int max_mesh_index = (int)(get_param.max_distance/get_param.mesh_size+0.999);
for(int count_x=-max_mesh_index;count_x<=max_mesh_index;count_x++){
for(int count_y=-max_mesh_index;count_y<=max_mesh_index;count_y++){
for(int count_z=-max_mesh_index;count_z<=max_mesh_index;count_z++){
int z_index = (int)((1/get_param.mesh_size)*(position.z+get_param.field_size)) + count_z;
int y_index = (int)((1/get_param.mesh_size)*(position.y+get_param.field_size)) + count_y;
int x_index = (int)((1/get_param.mesh_size)*(position.x+get_param.field_size)) + count_x;
int x_flag = 0;
int y_flag = 0;
int z_flag = 0;
if (z_index < 0){
z_index = Nd + z_index;
z_flag = -1;
}
if(z_index >= Nd){
z_index = z_index - Nd;
z_flag = 1;
}
if(y_index < 0){
y_index = Nd + y_index;
y_flag = -1;
}
if(y_index >= Nd){
y_index = y_index - Nd;
y_flag = 1;
}
if(x_index < 0){
x_index = Nd + x_index;
x_flag = -1;
}
if(x_index >= Nd){
x_index = x_index - Nd;
x_flag = 1;
}
int coord_index = (z_index*Nd*Nd + y_index*Nd + x_index);
int tmp_index;
tmp_index = coord[coord_index];
int count = 0;
while(count!=-1){
//if(tmp_index == index) printf("1+");
if(tmp_index!=-1){
p = original_position[tmp_index];
#if PERIODIC == 1
if (z_flag == 1){
p.z = p.z + 2*FIELD_SIZE;
}
if(z_flag == -1){
p.z = p.z - 2*FIELD_SIZE;
}
if(y_flag == 1){
p.y = p.y + 2*FIELD_SIZE;
}
if(y_flag == -1){
p.y = p.y - 2*FIELD_SIZE;
}
if(x_flag == 1){
p.x = p.x + 2*FIELD_SIZE;
}
if(x_flag == -1){
p.x = p.x - 2*FIELD_SIZE;
}
#endif
float k2 = ((p.x-position.x)*velocity.x + (p.y-position.y)*velocity.y + (p.z-position.z)*velocity.z)/sqrt((p.x-position.x)*(p.x-position.x)+(p.y-position.y)*(p.y-position.y)+(p.z-position.z)*(p.z-position.z))/sqrt(velocity.x*velocity.x+velocity.y*velocity.y+velocity.z*velocity.z);
if(k2 > get_param.max_angle){ //when tmp_index = index: k2 = nan that's why no problem
float dx = abs(position.x - p.x);
if (dx < get_param.max_distance){
float dy = abs(position.y - p.y);
if (dy < get_param.max_distance){
float dz = abs(position.z - p.z);
if (dz < get_param.max_distance){
d = sqrt(dx*dx + dy*dy + dz*dz);
if (d < get_param.max_distance){
//alignment
if(d < get_param.sight_distance_alignment && k2 > get_param.sight_angle_alignment){
neighboursCount_ave++;
neighboursAvgSpeed = neighboursAvgSpeed + make_double3(original_speed[tmp_index].x,original_speed[tmp_index].y,original_speed[tmp_index].z);
}
//cohesion
if (d < get_param.sight_distance_cohesion && k2 > get_param.sight_angle_cohesion){
if(d>0){
neighboursCount++;
neighboursAvgPosition = neighboursAvgPosition + make_double4(p.x,p.y,p.z,p.w);
}else{
printf("position of two particles are completely matched, there are probability of bug existing.\n");
}
}
//separation
if (d < get_param.min_distance && k2 > get_param.sight_angle_separation) {
if(d>0){
separationForce = separationForce + ((position - p))/d;
}else{
printf("position of two particles are completely matched, there are probability of bug existing.\n");
}
}
}
}
}
}
count += 1;
}
tmp_index = coord_particle[tmp_index];
if(tmp_index==-1){
count = -1;
}
}else{
count = -1;
}
}
}
}
}
float tmp_coeff = EPS*get_param.min_distance*get_param.w_min_distance;
velocity = velocity + tmp_coeff*(cast_float3(separationForce));
if(neighboursCount_ave > 0){
neighboursAvgSpeed = neighboursAvgSpeed / neighboursCount_ave;
velocity = velocity + EPS*get_param.w_neighbour_speed*(cast_float3(neighboursAvgSpeed) - velocity);
}
if(neighboursCount > 0){
neighboursAvgPosition = neighboursAvgPosition / neighboursCount;
float3 coh_v;
coh_v = cast_float3(neighboursAvgPosition) - cast_float3(position);
//float norm = 1;//sqrt(coh_x*coh_x+coh_y*coh_y+coh_z*coh_z);
velocity = velocity + EPS*get_param.w_neighbour_distance*coh_v;///norm;
}
#if NOISE == 1
hiprandState_t s;
hiprand_init(10, index, 0, &s);
float3 noise_rand = make_float3(2*(hiprand_uniform(&s)-0.5),2*(hiprand_uniform(&s)-0.5),2*(hiprand_uniform(&s)-0.5));
velocity = velocity + EPS*get_param.w_noise* get_param.max_speed * noise_rand;
#endif
float check_speed = sqrt(velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z);
if(check_speed > get_param.max_speed){
float norm = 1/(get_param.max_speed)*check_speed;
velocity = velocity / norm;
}else if(check_speed < get_param.min_speed){
float norm = 1/(get_param.min_speed)*check_speed;
velocity = velocity / norm;
}
position = position + EPS * cast_float4(velocity);
float scale = (float)get_param.field_size;
#if PERIODIC == 1
if (position.x >= scale) {
position.x = -2*scale + position.x;
}else if (position.x < -scale) {
position.x = 2*scale + position.x;
}
if (position.y >= scale) {
position.y = -2*scale + position.y;
}else if (position.y < -scale) {
position.y = 2*scale + position.y;
}
if (position.z >= scale) {
position.z = -2*scale + position.z;
}else if (position.z < -scale) {
position.z = 2*scale + position.z;
}
#else
if (position.x >= scale) {
velocity.x = -velocity.x;
position.x = 2*scale - position.x;
}else if (position.x < -scale) {
velocity.x = -velocity.x;
position.x = -2*scale - position.x;
}
if (position.y >= scale) {
velocity.y = -velocity.y;
position.y = 2*scale - position.y;
}else if (position.y < -scale) {
velocity.y = -velocity.y;
position.y = -2*scale - position.y;
}
if (position.z >= scale) {
velocity.z = -velocity.z;
position.z = 2*scale - position.z;
}else if (position.z < -scale) {
velocity.z = -velocity.z;
position.z = -2*scale - position.z;
}
#endif
tmp_position_for_fill[index] = original_position[index];
tmp_position[index] = position;
tmp_speed[index] = velocity;
}
//reload original variables
__global__ void reload(float4 *original_position ,float3 *original_speed , float4 *tmp_position, unsigned int width, unsigned int height,float3 *tmp_speed)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int index = y*width+x;
original_position[index] = tmp_position[index];
original_speed[index] = tmp_speed[index];
}
//sent data for
__global__ void sent_data(float4 *pos,float4 *tmp_position, unsigned int width, unsigned int height,float3 *tmp_speed,float3 *speed)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int index = y*width+x;
pos[index] = tmp_position[index];
speed[index] = tmp_speed[index];
}
//set initial value of position and speed
__global__ void prepare_calc(Paramstruct* param,float4 *original_position, unsigned int width, unsigned int height, float3 *original_speed)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int index = y*width+x;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*INIT_BOIDS_SIZE - INIT_BOIDS_SIZE/2.0f;
v = v*INIT_BOIDS_SIZE - INIT_BOIDS_SIZE/2.0f;
__syncthreads();
hiprandState_t s;
hiprand_init(width, index, 0, &s);
original_position[index] = make_float4(INIT_BOIDS_SIZE*(2*(hiprand_uniform(&s)-0.5)), INIT_BOIDS_SIZE*(2*(hiprand_uniform(&s)-0.5)), INIT_BOIDS_SIZE*(2*(hiprand_uniform(&s)-0.5)),1.0f);//1*random(index), 1.0f);
original_speed[index] = make_float3(0.05*2*(hiprand_uniform(&s)-0.5),0.05*2*(hiprand_uniform(&s)-0.5),0.05*2*(hiprand_uniform(&s)-0.5));
}
//fill -1 in lattice array
__global__ void fill_space(float4 *tmp_position_for_fill, unsigned int width, unsigned int height, int *coord,int* coord_particle)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int index = y*width+x;
int Nd = 2*FIELD_SIZE*(int)(1/MESH_SIZE);
int coord_index_pre = ((int)((1/MESH_SIZE)*(tmp_position_for_fill[index].z+FIELD_SIZE)))*Nd*Nd + ((int)((1/MESH_SIZE)*(tmp_position_for_fill[index].y+FIELD_SIZE)))*Nd + ((int)((1/MESH_SIZE)*(tmp_position_for_fill[index].x+FIELD_SIZE)));
coord[coord_index_pre] = -1;
}
//regist particle id for lattice array
__global__ void divide_space(float4 *original_position, unsigned int width, unsigned int height, int *coord,int* coord_particle)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int index = y*width+x;
int Nd = 2*FIELD_SIZE*(int)(1/MESH_SIZE);
int coord_index = ((int)((1/MESH_SIZE)*(original_position[index].z+FIELD_SIZE)))*Nd*Nd + ((int)((1/MESH_SIZE)*(original_position[index].y+FIELD_SIZE)))*Nd + ((int)((1/MESH_SIZE)*(original_position[index].x+FIELD_SIZE)));
if(atomicCAS(&coord[coord_index],-1,index)==-1){
coord_particle[index] = -1;
}else{
int ok_flag = 0;
while(ok_flag==0){
int tmp_index = coord[coord_index];
if(atomicCAS(&coord[coord_index],tmp_index,index)==tmp_index){
coord_particle[index] = tmp_index;
ok_flag = 1;
}else{
//printf(" ERROR\n ");
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void prepare(float4 *pos, unsigned int mesh_width,
unsigned int mesh_height, float3 *speed)
{
// execute the kernel
int block_y;
if(mesh_height >= 32){
block_y = 32;
}else{
block_y = 1;
}
dim3 block(32, block_y, 1);
dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
hipLaunchKernelGGL(( prepare_calc), dim3(grid), dim3(block),0,stream0, param_device,tmp_position, mesh_width, mesh_height, tmp_speed);
hipLaunchKernelGGL(( prepare_calc), dim3(grid), dim3(block),0,stream0, param_device,original_position, mesh_width, mesh_height, original_speed);
//zero fill
int Nd = 2*FIELD_SIZE*(int)(1/MESH_SIZE)+1;
hipStreamSynchronize(stream0);
thrust::device_ptr<int> coord_thrust(coord);
thrust::fill(coord_thrust,coord_thrust+Nd*Nd*Nd,-1);
hipLaunchKernelGGL(( divide_space), dim3(grid), dim3(block),0,stream0, original_position, mesh_width, mesh_height, coord,coord_particle);
}
void setparam(){
param_host = (Paramstruct*)malloc(sizeof(Paramstruct));
if(SIGHT_DISTANCE_COHESION >= SIGHT_DISTANCE_ALIGNMENT){
param_host->max_distance = SIGHT_DISTANCE_COHESION;//distance_cohesion
}else{
param_host->max_distance = SIGHT_DISTANCE_ALIGNMENT;
}
//printf("max_distance=%f\n",param_host->max_distance);
param_host->sight_distance_cohesion = SIGHT_DISTANCE_COHESION;
param_host->sight_distance_alignment = SIGHT_DISTANCE_ALIGNMENT;
param_host->min_distance = MIN_DISTANCE;
if(SIGHT_ANGLE_COHESION < SIGHT_ANGLE_ALIGNMENT && SIGHT_ANGLE_COHESION < SIGHT_ANGLE_SEPARATION){
param_host->max_angle = SIGHT_ANGLE_COHESION;
}else if(SIGHT_ANGLE_ALIGNMENT < SIGHT_ANGLE_SEPARATION){
param_host->max_angle = SIGHT_ANGLE_ALIGNMENT;
}else{
param_host->max_angle = SIGHT_ANGLE_SEPARATION;
}
//printf("max_dangle=%f\n",param_host->max_angle);
param_host->sight_angle_separation = SIGHT_ANGLE_SEPARATION;
param_host->sight_angle_alignment = SIGHT_ANGLE_ALIGNMENT;
param_host->sight_angle_cohesion = SIGHT_ANGLE_COHESION;
param_host->w_neighbour_speed = W_NEIGHBOUR_SPEED;
param_host->w_neighbour_distance = W_NEIGHBOUR_DISTANCE;
param_host->w_min_distance = W_MIN_DISTANCE;
param_host->w_noise = W_NOISE;
param_host->min_speed = MIN_SPEED;
param_host->max_speed = MAX_SPEED;
param_host->field_size = FIELD_SIZE;
param_host->mesh_size = MESH_SIZE;
checkCudaErrors(hipMemcpy(param_device, param_host, sizeof(Paramstruct), hipMemcpyHostToDevice));
}
void launch_calc(float4 *pos, unsigned int mesh_width,
unsigned int mesh_height, float3 *speed)
{
int block_y;
if(mesh_height >= 32){
block_y = 32;
}else{
block_y = 1;
}
dim3 block(32, block_y, 1);
dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
hipStreamSynchronize(stream0);
hipLaunchKernelGGL(( calc_core), dim3(grid), dim3(block),0,stream0, param_device, original_position ,original_speed ,tmp_position,tmp_position_for_fill, mesh_width, mesh_height ,tmp_speed, coord,coord_particle);
hipStreamSynchronize(stream0);
hipLaunchKernelGGL(( reload), dim3(grid), dim3(block),0,stream0, original_position ,original_speed , tmp_position, mesh_width, mesh_height,tmp_speed);
#if DEBUG == 1
//zero fill
thrust::device_ptr<int> coord_thrust(coord);
thrust::fill(coord_thrust,coord_thrust+Nd*Nd,-1);
#endif
hipLaunchKernelGGL(( fill_space), dim3(grid), dim3(block),0,stream0, tmp_position_for_fill, mesh_width, mesh_height, coord,coord_particle);
hipStreamSynchronize(stream0);
hipLaunchKernelGGL(( divide_space), dim3(grid), dim3(block),0,stream0, original_position, mesh_width, mesh_height, coord,coord_particle);
hipStreamSynchronize(stream0);
hipHostGetDevicePointer( &pos, data_for_calc->a, 0 );
hipHostGetDevicePointer( &speed, data_for_calc->b, 0 );
hipLaunchKernelGGL(( sent_data), dim3(grid), dim3(block),0,stream1, pos,original_position, mesh_width, mesh_height ,tmp_speed,speed);
}
bool malloc_val(int argc, char **argv, char *ref_file)
{
int Nd = 2*FIELD_SIZE*(int)(1/MESH_SIZE)+1;
checkCudaErrors(hipMalloc((void **)¶m_device, sizeof(Paramstruct)));
checkCudaErrors(hipMalloc((void **)&original_speed, mesh_width*mesh_height*sizeof(float3)));
checkCudaErrors(hipMalloc((void **)&original_position, mesh_width*mesh_height*sizeof(float4)));
checkCudaErrors(hipMalloc((void **)&tmp_speed, mesh_width*mesh_height*sizeof(float3)));
checkCudaErrors(hipMalloc((void **)&tmp_position, mesh_width*mesh_height*sizeof(float4)));
checkCudaErrors(hipMalloc((void **)&tmp_position_for_fill, mesh_width*mesh_height*sizeof(float4)));
checkCudaErrors(hipMalloc((void **)&coord_particle, mesh_width*mesh_height*sizeof(int)));
checkCudaErrors(hipMalloc((void **)&coord, Nd*Nd*Nd*sizeof(int)));
hipError_t result;
result = hipStreamCreate(&stream1);
result = hipStreamCreate(&stream0);
//printf("error=%d\n",result);
return true;
}
void run()
{
// map OpenGL buffer object for writing from CUDA
float4 *dptr = NULL;
launch_calc(dptr, mesh_width, mesh_height, speed);
}
void preparefunc()
{
// map OpenGL buffer object for writing from CUDA
prepare(dptr, mesh_width, mesh_height, speed);
}
void* routine( void *pvoidData ){
printf("calc start\n");
data_for_calc = (DataStruct*)pvoidData;
char *ref_file = NULL;
int *argc = data_for_calc->pArgc;
char **argv = data_for_calc->pArgv;
malloc_val(argc[0], argv, ref_file);
setparam();
preparefunc();
FILE* save_fp;
#if SEARCH == 0
int n = 0; //dummy variable to erase the "unreachable" warning
while(n != EOF){
run();
if(data_for_calc->time%1000<=100){
if(data_for_calc->time%1000==0){
char file_name[100] = "param0_";
sprintf(file_name,"param%d_%ld.csv",PARAM_NUM,data_for_calc->time);
save_fp = fopen(file_name,"w");
}
fprintf(save_fp,"%ld",data_for_calc->time);
for(int count=0;count<N;count++){
fprintf(save_fp,",%f,%f,%f",data_for_calc->a[count].x,data_for_calc->a[count].y,data_for_calc->a[count].z);
fprintf(save_fp,",%f,%f,%f",data_for_calc->b[count].x,data_for_calc->b[count].y,data_for_calc->b[count].z);
}
fprintf(save_fp,"\n");
if(data_for_calc->time%1000==100){
fclose(save_fp);
}
}
data_for_calc->time += 1;
}
#elif SEARCH == 1
for(int count_param1=-5;count_param1<6;count_param1++){
printf("count_param1=%d\n",count_param1);
for(int count_param2=-5;count_param2<6;count_param2++){
setparam();
preparefunc();
data_for_calc->time = 0;
param_host->sight_distance_cohesion = SIGHT_DISTANCE_COHESION + 0.01*count_param1;
param_host->sight_distance_alignment = SIGHT_DISTANCE_ALIGNMENT + 0.01*count_param2;
if(param_host->sight_distance_cohesion >= param_host->sight_distance_alignment){
param_host->max_distance = param_host->sight_distance_cohesion;//distance_cohesion
}else{
param_host->max_distance = param_host->sight_distance_alignment;
}
checkCudaErrors(hipMemcpy(param_device, param_host, sizeof(Paramstruct), hipMemcpyHostToDevice));
while(data_for_calc->time <= 5100){
/*if(data_for_calc->time == 7000){
param_host->w_noise = 0.1;
checkCudaErrors(hipMemcpy(param_device, param_host, sizeof(Paramstruct), hipMemcpyHostToDevice));
}else if(data_for_calc->time == 7005){
param_host->w_noise = W_NOISE;
checkCudaErrors(hipMemcpy(param_device, param_host, sizeof(Paramstruct), hipMemcpyHostToDevice));
}*/
run();
if(data_for_calc->time%5000<=100 & data_for_calc->time > 100){
if(data_for_calc->time%5000==0){
char file_name[100] = "param0_";
sprintf(file_name,"search_param_fow%d_%ld.csv",(count_param1+5)*11+(count_param2+5),data_for_calc->time);
save_fp = fopen(file_name,"w");
}
fprintf(save_fp,"%ld",data_for_calc->time);
for(int count=0;count<N;count++){
fprintf(save_fp,",%f,%f,%f",data_for_calc->a[count].x,data_for_calc->a[count].y,data_for_calc->a[count].z);
fprintf(save_fp,",%f,%f,%f",data_for_calc->b[count].x,data_for_calc->b[count].y,data_for_calc->b[count].z);
}
fprintf(save_fp,"\n");
if(data_for_calc->time%5000==100 & data_for_calc->time > 100){
fclose(save_fp);
}
}
data_for_calc->time += 1;
//printf("time=%ld\n",data_for_calc->time);
}
}
}
#endif
hipStreamDestroy( stream0 );
hipStreamDestroy( stream1 );
printf("finish_process\n");
return 0;
}
| 70c8bcd2731fe498c0c416decac7aae1434fde49.cu | /*
* LargeBoidsSimulator
*
* Yhoichi Mototake
*/
#include <cuda.h>
#include <curand_kernel.h>
#include <cuda_runtime.h>
#include <stdlib.h>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
#include <vector_types.h>
#include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#include "calc.h"
#include "param.h"
/////////////
/*Variables*/
/////////////////////////////////////
float3 *speed = NULL;
float3 *tmp_speed = NULL;
float4 *tmp_position = NULL;
float3 *original_speed = NULL;
float4 *original_position = NULL;
float4 *tmp_position_for_fill = NULL;
float4 *dptr = NULL;
int* coord = NULL;
int* coord_particle = NULL;
DataStruct* data_for_calc;
Paramstruct* param_host;
Paramstruct* param_device;
cudaStream_t stream0;
cudaStream_t stream1;
/////////////////////////////////////
//////////////////////////////////////////
/*Definitions of inline device functions*/
/////////////////////////////////////////////////////////////////////////////////////
inline __host__ __device__ float4 operator+(const float4 &a, const float4 &b)
{
return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
inline __host__ __device__ float3 operator+(const float3 &a, const float3 &b)
{
return make_float3(a.x + b.x, a.y + b.y,a.z + b.z);
}
inline __host__ __device__ double3 operator+(const double3 &a, const float4 &b)
{
return make_double3(a.x + (double)b.x, a.y + (double)b.y,a.z + (double)b.z);
}
inline __host__ __device__ double3 operator+(const double3 &a, const double4 &b)
{
return make_double3(a.x + b.x, a.y + b.y,a.z + b.z);
}
inline __host__ __device__ double3 operator+(const double3 &a, const double3 &b)
{
return make_double3(a.x + b.x, a.y + b.y,a.z + b.z);
}
inline __host__ __device__ double4 operator-(const float4 &a, const float4 &b)
{
return make_double4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w);
}
inline __host__ __device__ float3 operator-(const float3 &a, const float3 &b)
{
return make_float3(a.x - b.x, a.y - b.y, a.z - b.z);
}
inline __host__ __device__ double4 operator+(const double4 &a, const double4 &b)
{
return make_double4(a.x + b.x, a.y + b.y,a.z + b.z,a.w + b.w);
}
inline __host__ __device__ float4 operator-(const float4 &a, const float &b)
{
return make_float4(a.x - b, a.y - b, a.z - b, a.w - b);
}
inline __host__ __device__ float4 operator+(const float4 &a, const float &b)
{
return make_float4(a.x + b, a.y + b, a.z + b, a.w + b);
}
inline __host__ __device__ double4 operator/(const double4 &a, const double &b)
{
return make_double4(a.x / b, a.y / b, a.z / b, a.w / b);
}
inline __host__ __device__ float3 operator/(const float3 &a, const float &b)
{
return make_float3(a.x / b, a.y / b, a.z / b);
}
inline __host__ __device__ double3 operator/(const double3 &a, const int &b)
{
return make_double3(a.x / b, a.y / b, a.z / b);
}
inline __host__ __device__ float4 operator*(const float &a, const float4 &b)
{
return make_float4(a * b.x, a * b.y, a * b.z,a * b.w);
}
inline __host__ __device__ float3 operator*(const float &a, const float3 &b)
{
return make_float3(a * b.x, a * b.y, a * b.z);
}
inline __host__ __device__ double4 operator*(const double &a, const double4 &b)
{
return make_double4(a * b.x, a * b.y, a * b.z,a * b.w);
}
inline __host__ __device__ float dot(float4 a, float4 b)
{
return a.x*b.x + a.y*b.y + a.z*b.z + a.w*b.w;
}
inline __host__ __device__ float dot(float3 a, float3 b)
{
return a.x*b.x + a.y*b.y + a.z*b.z;
}
inline __host__ __device__ float3 cast_float3(double4 b)
{
float3 a;
a.x = (float)b.x;
a.y = (float)b.y;
a.z = (float)b.z;
return a;
}
inline __host__ __device__ float3 cast_float3(double3 b)
{
float3 a;
a.x = (float)b.x;
a.y = (float)b.y;
a.z = (float)b.z;
return a;
}
inline __host__ __device__ float3 cast_float3(float4 b)
{
float3 a;
a.x = (float)b.x;
a.y = (float)b.y;
a.z = (float)b.z;
return a;
}
inline __host__ __device__ float4 cast_float4(float3 b)
{
float4 a;
a.x = (float)b.x;
a.y = (float)b.y;
a.z = (float)b.z;
a.w = 0.0;
return a;
}
/////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////
/*Definitions of global device functions*/
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//main calculation function
__global__ void calc_core(Paramstruct* param,float4 *original_position ,float3 *original_speed , float4 *tmp_position,float4 *tmp_position_for_fill, unsigned int width, unsigned int height,float3 *tmp_speed, int *coord,int* coord_particle)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int index = y*width+x;
float4 position;
position = original_position[index];
float3 velocity;
velocity = original_speed[index];
__shared__ Paramstruct get_param;
get_param = *param;
int neighboursCount;
neighboursCount = 0;
int neighboursCount_ave;
neighboursCount_ave = 0;
double3 neighboursAvgSpeed;
neighboursAvgSpeed = make_double3(0, 0, 0);
double4 neighboursAvgPosition;
neighboursAvgPosition = make_double4(0, 0, 0, 0);
double3 separationForce;
separationForce = make_double3(0, 0, 0);
float4 p;
double d;
int Nd = 2*get_param.field_size*(int)(1/get_param.mesh_size);
int max_mesh_index = (int)(get_param.max_distance/get_param.mesh_size+0.999);
for(int count_x=-max_mesh_index;count_x<=max_mesh_index;count_x++){
for(int count_y=-max_mesh_index;count_y<=max_mesh_index;count_y++){
for(int count_z=-max_mesh_index;count_z<=max_mesh_index;count_z++){
int z_index = (int)((1/get_param.mesh_size)*(position.z+get_param.field_size)) + count_z;
int y_index = (int)((1/get_param.mesh_size)*(position.y+get_param.field_size)) + count_y;
int x_index = (int)((1/get_param.mesh_size)*(position.x+get_param.field_size)) + count_x;
int x_flag = 0;
int y_flag = 0;
int z_flag = 0;
if (z_index < 0){
z_index = Nd + z_index;
z_flag = -1;
}
if(z_index >= Nd){
z_index = z_index - Nd;
z_flag = 1;
}
if(y_index < 0){
y_index = Nd + y_index;
y_flag = -1;
}
if(y_index >= Nd){
y_index = y_index - Nd;
y_flag = 1;
}
if(x_index < 0){
x_index = Nd + x_index;
x_flag = -1;
}
if(x_index >= Nd){
x_index = x_index - Nd;
x_flag = 1;
}
int coord_index = (z_index*Nd*Nd + y_index*Nd + x_index);
int tmp_index;
tmp_index = coord[coord_index];
int count = 0;
while(count!=-1){
//if(tmp_index == index) printf("1+");
if(tmp_index!=-1){
p = original_position[tmp_index];
#if PERIODIC == 1
if (z_flag == 1){
p.z = p.z + 2*FIELD_SIZE;
}
if(z_flag == -1){
p.z = p.z - 2*FIELD_SIZE;
}
if(y_flag == 1){
p.y = p.y + 2*FIELD_SIZE;
}
if(y_flag == -1){
p.y = p.y - 2*FIELD_SIZE;
}
if(x_flag == 1){
p.x = p.x + 2*FIELD_SIZE;
}
if(x_flag == -1){
p.x = p.x - 2*FIELD_SIZE;
}
#endif
float k2 = ((p.x-position.x)*velocity.x + (p.y-position.y)*velocity.y + (p.z-position.z)*velocity.z)/sqrt((p.x-position.x)*(p.x-position.x)+(p.y-position.y)*(p.y-position.y)+(p.z-position.z)*(p.z-position.z))/sqrt(velocity.x*velocity.x+velocity.y*velocity.y+velocity.z*velocity.z);
if(k2 > get_param.max_angle){ //when tmp_index = index: k2 = nan that's why no problem
float dx = abs(position.x - p.x);
if (dx < get_param.max_distance){
float dy = abs(position.y - p.y);
if (dy < get_param.max_distance){
float dz = abs(position.z - p.z);
if (dz < get_param.max_distance){
d = sqrt(dx*dx + dy*dy + dz*dz);
if (d < get_param.max_distance){
//alignment
if(d < get_param.sight_distance_alignment && k2 > get_param.sight_angle_alignment){
neighboursCount_ave++;
neighboursAvgSpeed = neighboursAvgSpeed + make_double3(original_speed[tmp_index].x,original_speed[tmp_index].y,original_speed[tmp_index].z);
}
//cohesion
if (d < get_param.sight_distance_cohesion && k2 > get_param.sight_angle_cohesion){
if(d>0){
neighboursCount++;
neighboursAvgPosition = neighboursAvgPosition + make_double4(p.x,p.y,p.z,p.w);
}else{
printf("position of two particles are completely matched, there are probability of bug existing.\n");
}
}
//separation
if (d < get_param.min_distance && k2 > get_param.sight_angle_separation) {
if(d>0){
separationForce = separationForce + ((position - p))/d;
}else{
printf("position of two particles are completely matched, there are probability of bug existing.\n");
}
}
}
}
}
}
count += 1;
}
tmp_index = coord_particle[tmp_index];
if(tmp_index==-1){
count = -1;
}
}else{
count = -1;
}
}
}
}
}
float tmp_coeff = EPS*get_param.min_distance*get_param.w_min_distance;
velocity = velocity + tmp_coeff*(cast_float3(separationForce));
if(neighboursCount_ave > 0){
neighboursAvgSpeed = neighboursAvgSpeed / neighboursCount_ave;
velocity = velocity + EPS*get_param.w_neighbour_speed*(cast_float3(neighboursAvgSpeed) - velocity);
}
if(neighboursCount > 0){
neighboursAvgPosition = neighboursAvgPosition / neighboursCount;
float3 coh_v;
coh_v = cast_float3(neighboursAvgPosition) - cast_float3(position);
//float norm = 1;//sqrt(coh_x*coh_x+coh_y*coh_y+coh_z*coh_z);
velocity = velocity + EPS*get_param.w_neighbour_distance*coh_v;///norm;
}
#if NOISE == 1
curandState s;
curand_init(10, index, 0, &s);
float3 noise_rand = make_float3(2*(curand_uniform(&s)-0.5),2*(curand_uniform(&s)-0.5),2*(curand_uniform(&s)-0.5));
velocity = velocity + EPS*get_param.w_noise* get_param.max_speed * noise_rand;
#endif
float check_speed = sqrt(velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z);
if(check_speed > get_param.max_speed){
float norm = 1/(get_param.max_speed)*check_speed;
velocity = velocity / norm;
}else if(check_speed < get_param.min_speed){
float norm = 1/(get_param.min_speed)*check_speed;
velocity = velocity / norm;
}
position = position + EPS * cast_float4(velocity);
float scale = (float)get_param.field_size;
#if PERIODIC == 1
if (position.x >= scale) {
position.x = -2*scale + position.x;
}else if (position.x < -scale) {
position.x = 2*scale + position.x;
}
if (position.y >= scale) {
position.y = -2*scale + position.y;
}else if (position.y < -scale) {
position.y = 2*scale + position.y;
}
if (position.z >= scale) {
position.z = -2*scale + position.z;
}else if (position.z < -scale) {
position.z = 2*scale + position.z;
}
#else
if (position.x >= scale) {
velocity.x = -velocity.x;
position.x = 2*scale - position.x;
}else if (position.x < -scale) {
velocity.x = -velocity.x;
position.x = -2*scale - position.x;
}
if (position.y >= scale) {
velocity.y = -velocity.y;
position.y = 2*scale - position.y;
}else if (position.y < -scale) {
velocity.y = -velocity.y;
position.y = -2*scale - position.y;
}
if (position.z >= scale) {
velocity.z = -velocity.z;
position.z = 2*scale - position.z;
}else if (position.z < -scale) {
velocity.z = -velocity.z;
position.z = -2*scale - position.z;
}
#endif
tmp_position_for_fill[index] = original_position[index];
tmp_position[index] = position;
tmp_speed[index] = velocity;
}
//reload original variables
__global__ void reload(float4 *original_position ,float3 *original_speed , float4 *tmp_position, unsigned int width, unsigned int height,float3 *tmp_speed)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int index = y*width+x;
original_position[index] = tmp_position[index];
original_speed[index] = tmp_speed[index];
}
//sent data for
__global__ void sent_data(float4 *pos,float4 *tmp_position, unsigned int width, unsigned int height,float3 *tmp_speed,float3 *speed)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int index = y*width+x;
pos[index] = tmp_position[index];
speed[index] = tmp_speed[index];
}
//set initial value of position and speed
__global__ void prepare_calc(Paramstruct* param,float4 *original_position, unsigned int width, unsigned int height, float3 *original_speed)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int index = y*width+x;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*INIT_BOIDS_SIZE - INIT_BOIDS_SIZE/2.0f;
v = v*INIT_BOIDS_SIZE - INIT_BOIDS_SIZE/2.0f;
__syncthreads();
curandState s;
curand_init(width, index, 0, &s);
original_position[index] = make_float4(INIT_BOIDS_SIZE*(2*(curand_uniform(&s)-0.5)), INIT_BOIDS_SIZE*(2*(curand_uniform(&s)-0.5)), INIT_BOIDS_SIZE*(2*(curand_uniform(&s)-0.5)),1.0f);//1*random(index), 1.0f);
original_speed[index] = make_float3(0.05*2*(curand_uniform(&s)-0.5),0.05*2*(curand_uniform(&s)-0.5),0.05*2*(curand_uniform(&s)-0.5));
}
//fill -1 in lattice array
__global__ void fill_space(float4 *tmp_position_for_fill, unsigned int width, unsigned int height, int *coord,int* coord_particle)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int index = y*width+x;
int Nd = 2*FIELD_SIZE*(int)(1/MESH_SIZE);
int coord_index_pre = ((int)((1/MESH_SIZE)*(tmp_position_for_fill[index].z+FIELD_SIZE)))*Nd*Nd + ((int)((1/MESH_SIZE)*(tmp_position_for_fill[index].y+FIELD_SIZE)))*Nd + ((int)((1/MESH_SIZE)*(tmp_position_for_fill[index].x+FIELD_SIZE)));
coord[coord_index_pre] = -1;
}
//regist particle id for lattice array
__global__ void divide_space(float4 *original_position, unsigned int width, unsigned int height, int *coord,int* coord_particle)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int index = y*width+x;
int Nd = 2*FIELD_SIZE*(int)(1/MESH_SIZE);
int coord_index = ((int)((1/MESH_SIZE)*(original_position[index].z+FIELD_SIZE)))*Nd*Nd + ((int)((1/MESH_SIZE)*(original_position[index].y+FIELD_SIZE)))*Nd + ((int)((1/MESH_SIZE)*(original_position[index].x+FIELD_SIZE)));
if(atomicCAS(&coord[coord_index],-1,index)==-1){
coord_particle[index] = -1;
}else{
int ok_flag = 0;
while(ok_flag==0){
int tmp_index = coord[coord_index];
if(atomicCAS(&coord[coord_index],tmp_index,index)==tmp_index){
coord_particle[index] = tmp_index;
ok_flag = 1;
}else{
//printf(" ERROR\n ");
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void prepare(float4 *pos, unsigned int mesh_width,
unsigned int mesh_height, float3 *speed)
{
// execute the kernel
int block_y;
if(mesh_height >= 32){
block_y = 32;
}else{
block_y = 1;
}
dim3 block(32, block_y, 1);
dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
prepare_calc<<< grid, block,0,stream0>>>(param_device,tmp_position, mesh_width, mesh_height, tmp_speed);
prepare_calc<<< grid, block,0,stream0>>>(param_device,original_position, mesh_width, mesh_height, original_speed);
//zero fill
int Nd = 2*FIELD_SIZE*(int)(1/MESH_SIZE)+1;
cudaStreamSynchronize(stream0);
thrust::device_ptr<int> coord_thrust(coord);
thrust::fill(coord_thrust,coord_thrust+Nd*Nd*Nd,-1);
divide_space<<< grid, block,0,stream0>>>(original_position, mesh_width, mesh_height, coord,coord_particle);
}
void setparam(){
param_host = (Paramstruct*)malloc(sizeof(Paramstruct));
if(SIGHT_DISTANCE_COHESION >= SIGHT_DISTANCE_ALIGNMENT){
param_host->max_distance = SIGHT_DISTANCE_COHESION;//distance_cohesion
}else{
param_host->max_distance = SIGHT_DISTANCE_ALIGNMENT;
}
//printf("max_distance=%f\n",param_host->max_distance);
param_host->sight_distance_cohesion = SIGHT_DISTANCE_COHESION;
param_host->sight_distance_alignment = SIGHT_DISTANCE_ALIGNMENT;
param_host->min_distance = MIN_DISTANCE;
if(SIGHT_ANGLE_COHESION < SIGHT_ANGLE_ALIGNMENT && SIGHT_ANGLE_COHESION < SIGHT_ANGLE_SEPARATION){
param_host->max_angle = SIGHT_ANGLE_COHESION;
}else if(SIGHT_ANGLE_ALIGNMENT < SIGHT_ANGLE_SEPARATION){
param_host->max_angle = SIGHT_ANGLE_ALIGNMENT;
}else{
param_host->max_angle = SIGHT_ANGLE_SEPARATION;
}
//printf("max_dangle=%f\n",param_host->max_angle);
param_host->sight_angle_separation = SIGHT_ANGLE_SEPARATION;
param_host->sight_angle_alignment = SIGHT_ANGLE_ALIGNMENT;
param_host->sight_angle_cohesion = SIGHT_ANGLE_COHESION;
param_host->w_neighbour_speed = W_NEIGHBOUR_SPEED;
param_host->w_neighbour_distance = W_NEIGHBOUR_DISTANCE;
param_host->w_min_distance = W_MIN_DISTANCE;
param_host->w_noise = W_NOISE;
param_host->min_speed = MIN_SPEED;
param_host->max_speed = MAX_SPEED;
param_host->field_size = FIELD_SIZE;
param_host->mesh_size = MESH_SIZE;
checkCudaErrors(cudaMemcpy(param_device, param_host, sizeof(Paramstruct), cudaMemcpyHostToDevice));
}
void launch_calc(float4 *pos, unsigned int mesh_width,
unsigned int mesh_height, float3 *speed)
{
int block_y;
if(mesh_height >= 32){
block_y = 32;
}else{
block_y = 1;
}
dim3 block(32, block_y, 1);
dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
cudaStreamSynchronize(stream0);
calc_core<<< grid, block,0,stream0>>>(param_device, original_position ,original_speed ,tmp_position,tmp_position_for_fill, mesh_width, mesh_height ,tmp_speed, coord,coord_particle);
cudaStreamSynchronize(stream0);
reload<<< grid, block,0,stream0>>>(original_position ,original_speed , tmp_position, mesh_width, mesh_height,tmp_speed);
#if DEBUG == 1
//zero fill
thrust::device_ptr<int> coord_thrust(coord);
thrust::fill(coord_thrust,coord_thrust+Nd*Nd,-1);
#endif
fill_space<<< grid, block,0,stream0>>>(tmp_position_for_fill, mesh_width, mesh_height, coord,coord_particle);
cudaStreamSynchronize(stream0);
divide_space<<< grid, block,0,stream0>>>(original_position, mesh_width, mesh_height, coord,coord_particle);
cudaStreamSynchronize(stream0);
cudaHostGetDevicePointer( &pos, data_for_calc->a, 0 );
cudaHostGetDevicePointer( &speed, data_for_calc->b, 0 );
sent_data<<< grid, block,0,stream1>>>(pos,original_position, mesh_width, mesh_height ,tmp_speed,speed);
}
bool malloc_val(int argc, char **argv, char *ref_file)
{
int Nd = 2*FIELD_SIZE*(int)(1/MESH_SIZE)+1;
checkCudaErrors(cudaMalloc((void **)¶m_device, sizeof(Paramstruct)));
checkCudaErrors(cudaMalloc((void **)&original_speed, mesh_width*mesh_height*sizeof(float3)));
checkCudaErrors(cudaMalloc((void **)&original_position, mesh_width*mesh_height*sizeof(float4)));
checkCudaErrors(cudaMalloc((void **)&tmp_speed, mesh_width*mesh_height*sizeof(float3)));
checkCudaErrors(cudaMalloc((void **)&tmp_position, mesh_width*mesh_height*sizeof(float4)));
checkCudaErrors(cudaMalloc((void **)&tmp_position_for_fill, mesh_width*mesh_height*sizeof(float4)));
checkCudaErrors(cudaMalloc((void **)&coord_particle, mesh_width*mesh_height*sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&coord, Nd*Nd*Nd*sizeof(int)));
cudaError_t result;
result = cudaStreamCreate(&stream1);
result = cudaStreamCreate(&stream0);
//printf("error=%d\n",result);
return true;
}
void run()
{
// map OpenGL buffer object for writing from CUDA
float4 *dptr = NULL;
launch_calc(dptr, mesh_width, mesh_height, speed);
}
void preparefunc()
{
// map OpenGL buffer object for writing from CUDA
prepare(dptr, mesh_width, mesh_height, speed);
}
void* routine( void *pvoidData ){
printf("calc start\n");
data_for_calc = (DataStruct*)pvoidData;
char *ref_file = NULL;
int *argc = data_for_calc->pArgc;
char **argv = data_for_calc->pArgv;
malloc_val(argc[0], argv, ref_file);
setparam();
preparefunc();
FILE* save_fp;
#if SEARCH == 0
int n = 0; //dummy variable to erase the "unreachable" warning
while(n != EOF){
run();
if(data_for_calc->time%1000<=100){
if(data_for_calc->time%1000==0){
char file_name[100] = "param0_";
sprintf(file_name,"param%d_%ld.csv",PARAM_NUM,data_for_calc->time);
save_fp = fopen(file_name,"w");
}
fprintf(save_fp,"%ld",data_for_calc->time);
for(int count=0;count<N;count++){
fprintf(save_fp,",%f,%f,%f",data_for_calc->a[count].x,data_for_calc->a[count].y,data_for_calc->a[count].z);
fprintf(save_fp,",%f,%f,%f",data_for_calc->b[count].x,data_for_calc->b[count].y,data_for_calc->b[count].z);
}
fprintf(save_fp,"\n");
if(data_for_calc->time%1000==100){
fclose(save_fp);
}
}
data_for_calc->time += 1;
}
#elif SEARCH == 1
for(int count_param1=-5;count_param1<6;count_param1++){
printf("count_param1=%d\n",count_param1);
for(int count_param2=-5;count_param2<6;count_param2++){
setparam();
preparefunc();
data_for_calc->time = 0;
param_host->sight_distance_cohesion = SIGHT_DISTANCE_COHESION + 0.01*count_param1;
param_host->sight_distance_alignment = SIGHT_DISTANCE_ALIGNMENT + 0.01*count_param2;
if(param_host->sight_distance_cohesion >= param_host->sight_distance_alignment){
param_host->max_distance = param_host->sight_distance_cohesion;//distance_cohesion
}else{
param_host->max_distance = param_host->sight_distance_alignment;
}
checkCudaErrors(cudaMemcpy(param_device, param_host, sizeof(Paramstruct), cudaMemcpyHostToDevice));
while(data_for_calc->time <= 5100){
/*if(data_for_calc->time == 7000){
param_host->w_noise = 0.1;
checkCudaErrors(cudaMemcpy(param_device, param_host, sizeof(Paramstruct), cudaMemcpyHostToDevice));
}else if(data_for_calc->time == 7005){
param_host->w_noise = W_NOISE;
checkCudaErrors(cudaMemcpy(param_device, param_host, sizeof(Paramstruct), cudaMemcpyHostToDevice));
}*/
run();
if(data_for_calc->time%5000<=100 & data_for_calc->time > 100){
if(data_for_calc->time%5000==0){
char file_name[100] = "param0_";
sprintf(file_name,"search_param_fow%d_%ld.csv",(count_param1+5)*11+(count_param2+5),data_for_calc->time);
save_fp = fopen(file_name,"w");
}
fprintf(save_fp,"%ld",data_for_calc->time);
for(int count=0;count<N;count++){
fprintf(save_fp,",%f,%f,%f",data_for_calc->a[count].x,data_for_calc->a[count].y,data_for_calc->a[count].z);
fprintf(save_fp,",%f,%f,%f",data_for_calc->b[count].x,data_for_calc->b[count].y,data_for_calc->b[count].z);
}
fprintf(save_fp,"\n");
if(data_for_calc->time%5000==100 & data_for_calc->time > 100){
fclose(save_fp);
}
}
data_for_calc->time += 1;
//printf("time=%ld\n",data_for_calc->time);
}
}
}
#endif
cudaStreamDestroy( stream0 );
cudaStreamDestroy( stream1 );
printf("finish_process\n");
return 0;
}
|
03327db854240dd0160d44bf63e115b44c4dbc99.hip | // !!! This is a file automatically generated by hipify!!!
//hello.cu
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
int main(void) {
printf("Hello CUDA \n");
return 0;
}
| 03327db854240dd0160d44bf63e115b44c4dbc99.cu | //hello.cu
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
int main(void) {
printf("Hello CUDA \n");
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.